diff --git a/.ci/bwcVersions b/.ci/bwcVersions index cfaadc5ed1e5e..144a8b71fca39 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -25,4 +25,5 @@ BWC_VERSION: - "2.10.0" - "2.10.1" - "2.11.0" + - "2.11.1" - "2.12.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4fa118e8486f1..8076adcf00ca9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @reta @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @peternied @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami @msfroh +* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c47b9e0b69256..908a032bf833e 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,6 +17,7 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added +- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) - [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) diff --git a/.github/workflows/add-untriaged.yml b/.github/workflows/add-untriaged.yml index 11db8b9a61f50..38de96f663051 100644 --- a/.github/workflows/add-untriaged.yml +++ b/.github/workflows/add-untriaged.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v6 + - uses: actions/github-script@v7 with: script: | github.rest.issues.addLabels({ diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 6a66ac5fb5609..87cecdf38c072 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -7,13 +7,14 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: + java: [ 11, 17, 21 ] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 - - name: Set up JDK 11 + - name: Set up JDK ${{ matrix.java }} uses: actions/setup-java@v3 with: - java-version: 11 + java-version: ${{ matrix.java }} distribution: temurin - name: Setup docker (missing on MacOS) if: runner.os == 'macos' diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2a5e539b214d3..e6feb3b852ad0 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -27,7 +27,7 @@ jobs: ./gradlew updateSHAs - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Updating SHAs branch: ${{ github.head_ref }} @@ -40,7 +40,7 @@ jobs: ./gradlew spotlessApply - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Spotless formatting branch: ${{ github.head_ref }} @@ -54,7 +54,7 @@ jobs: version: 'Unreleased 2.x' - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: "Update changelog" branch: ${{ github.head_ref }} diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 31578b3fc4660..8c33d41c6b2b4 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -82,10 +82,7 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :white_check_mark: - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + :white_check_mark: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) - name: Extract Test Failure if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} @@ -108,10 +105,8 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :grey_exclamation: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + :grey_exclamation: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) ${{ env.test_failures }} + Please review all [flaky tests](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) that succeeded after retry and create an issue if one does not already exist to track the flaky failure. - name: Create Comment Failure @@ -120,12 +115,9 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. - Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? + :x: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) + + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? - name: Create Issue On Push Failure if: ${{ github.event_name == 'push' && failure() }} diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index 76981276fe085..ec5893ea546a9 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -50,7 +50,7 @@ jobs: run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v2 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_ROLE }} aws-region: us-west-2 diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index b04f404b11c55..cd75eb47946a4 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -7,13 +7,14 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: + java: [ 11, 17, 21 ] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 - - name: Set up JDK 11 + - name: Set up JDK ${{ matrix.java }} uses: actions/setup-java@v3 with: - java-version: 11 + java-version: ${{ matrix.java }} distribution: temurin cache: gradle - name: Run Gradle (precommit) diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 8c08df269a999..93bbfb8bbeab8 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -26,7 +26,7 @@ jobs: java-version: 17 - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v2 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.PUBLISH_SNAPSHOTS_ROLE }} aws-region: us-east-1 diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml new file mode 100644 index 0000000000000..11998e36c2dbb --- /dev/null +++ b/.github/workflows/pull-request-checks.yml @@ -0,0 +1,28 @@ +name: Pull Request Checks + +on: + pull_request: + types: + [ + opened, + edited, + review_requested, + synchronize, + reopened, + ready_for_review, + ] + +jobs: + verify-description-checklist: + name: Verify Description Checklist + runs-on: ubuntu-latest + steps: + - uses: peternied/check-pull-request-description-checklist@v1 + with: + checklist-items: | + New functionality includes testing. + All tests pass + New functionality has been documented. + New functionality has javadoc added + Commits are signed per the DCO using --signoff + Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6b24ca9089d1..1de3d9ae044bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,10 +10,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) -- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -33,7 +42,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 - Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0 - Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 - Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) @@ -46,6 +54,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -56,6 +65,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) ### Deprecated @@ -80,83 +90,74 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) +- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) ### Security ## [Unreleased 2.x] ### Added -- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) -- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) -- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) -- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) -- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) -- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) -- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) -- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) -- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) -- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) +- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) - Set ingest processor supports copying from one field to another ([#10529](https://github.com/opensearch-project/OpenSearch/pull/10529)) ### Dependencies -- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) -- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) -- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) -- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) -- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) -- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) -- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) -- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) -- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) -- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) -- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) -- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270)) +- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) +- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) +- Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) +- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) +- Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294)) ### Changed -- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) -- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) -- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) -- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) -- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) -- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) -- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) -- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) -- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) -- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) +- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) +- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) ### Deprecated ### Removed -- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) +- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) ### Fixed -- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) -- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) -- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) -- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) - Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) -- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) +- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) +- Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) +- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d379d78829318..4a1162cf2558b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,7 @@ - [Developer Certificate of Origin](#developer-certificate-of-origin) - [Changelog](#changelog) - [Review Process](#review-process) + - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) # Contributing to OpenSearch @@ -162,3 +163,14 @@ During the PR process, expect that there will be some back-and-forth. Please try If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and usually take care of backporting it to appropriate branches ourselves. If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. + +## Troubleshooting Failing Builds + +The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. + +If your pull request reports a failing test(s) on one of the checks, please: + - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question + - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time + - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label + - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) + - as a bonus point, try to contribute by fixing the flaky test(s) diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java deleted file mode 100644 index cdbcbfc163191..0000000000000 --- a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.benchmark.time; - -import org.opensearch.common.Rounding; -import org.opensearch.common.rounding.DateTimeUnit; -import org.opensearch.common.time.DateUtils; -import org.opensearch.common.unit.TimeValue; -import org.joda.time.DateTimeZone; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Warmup; - -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.util.concurrent.TimeUnit; - -import static org.opensearch.common.Rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.Rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.QUARTER_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.YEAR_OF_CENTURY; - -@Fork(3) -@Warmup(iterations = 10) -@Measurement(iterations = 10) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.NANOSECONDS) -@State(Scope.Benchmark) -@SuppressWarnings("unused") // invoked by benchmarking framework -public class RoundingBenchmark { - - private final ZoneId zoneId = ZoneId.of("Europe/Amsterdam"); - private final DateTimeZone timeZone = DateUtils.zoneIdToDateTimeZone(zoneId); - - private long timestamp = 1548879021354L; - - private final org.opensearch.common.rounding.Rounding jodaRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.HOUR_OF_DAY - ).timeZone(timeZone).build(); - private final Rounding javaRounding = Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitJoda() { - return jodaRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitJava() { - return javaRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding jodaDayOfMonthRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.DAY_OF_MONTH - ).timeZone(timeZone).build(); - private final Rounding javaDayOfMonthRounding = Rounding.builder(DAY_OF_MONTH).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJoda() { - return jodaDayOfMonthRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJava() { - return javaDayOfMonthRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeIntervalRoundingJoda = org.opensearch.common.rounding.Rounding.builder( - TimeValue.timeValueMinutes(60) - ).timeZone(timeZone).build(); - private final Rounding timeIntervalRoundingJava = Rounding.builder(TimeValue.timeValueMinutes(60)).timeZone(zoneId).build(); - - @Benchmark - public long timeIntervalRoundingJava() { - return timeIntervalRoundingJava.round(timestamp); - } - - @Benchmark - public long timeIntervalRoundingJoda() { - return timeIntervalRoundingJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcDayOfMonthJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.DAY_OF_MONTH) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcDayOfMonthJava = Rounding.builder(DAY_OF_MONTH).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJava() { - return timeUnitRoundingUtcDayOfMonthJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJoda() { - return timeUnitRoundingUtcDayOfMonthJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcQuarterOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.QUARTER) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcQuarterOfYearJava = Rounding.builder(QUARTER_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJava() { - return timeUnitRoundingUtcQuarterOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJoda() { - return timeUnitRoundingUtcQuarterOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcMonthOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.MONTH_OF_YEAR) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcMonthOfYearJava = Rounding.builder(MONTH_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJava() { - return timeUnitRoundingUtcMonthOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJoda() { - return timeUnitRoundingUtcMonthOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcYearOfCenturyJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.YEAR_OF_CENTURY) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcYearOfCenturyJava = Rounding.builder(YEAR_OF_CENTURY).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJava() { - return timeUnitRoundingUtcYearOfCenturyJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJoda() { - return timeUnitRoundingUtcYearOfCenturyJoda.round(timestamp); - } -} diff --git a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java similarity index 89% rename from benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java rename to benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java index 64c0a9e1d7aa6..4e07af452968b 100644 --- a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.common; +package org.opensearch.common.round; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -27,13 +27,13 @@ @Warmup(iterations = 3, time = 1) @Measurement(iterations = 1, time = 1) @BenchmarkMode(Mode.Throughput) -public class ArrayRoundingBenchmark { +public class RoundableBenchmark { @Benchmark - public void round(Blackhole bh, Options opts) { - Rounding.Prepared rounding = opts.supplier.get(); + public void floor(Blackhole bh, Options opts) { + Roundable roundable = opts.supplier.get(); for (long key : opts.queries) { - bh.consume(rounding.round(key)); + bh.consume(roundable.floor(key)); } } @@ -90,7 +90,7 @@ public static class Options { public String distribution; public long[] queries; - public Supplier supplier; + public Supplier supplier; @Setup public void setup() { @@ -130,10 +130,10 @@ public void setup() { switch (type) { case "binary": - supplier = () -> new Rounding.BinarySearchArrayRounding(values, size, null); + supplier = () -> new BinarySearcher(values, size); break; case "linear": - supplier = () -> new Rounding.BidirectionalLinearSearchArrayRounding(values, size, null); + supplier = () -> new BidirectionalLinearSearcher(values, size); break; default: throw new IllegalArgumentException("invalid type: " + type); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index a420c8b63b02c..1ad7e056b6ae6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "11.0.20+8"; + private static final String SYSTEM_JDK_VERSION = "17.0.9+9"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.8+7"; + private static final String GRADLE_JDK_VERSION = "17.0.9+9"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index cb8050d1718c4..f24b61ef0d165 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -15,8 +15,9 @@ plugins { repositories { mavenCentral() } + dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.20.0" + implementation "org.apache.logging.log4j:log4j-core:2.22.0" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e54a5a1089a93..c11e0aa25fa48 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,19 +2,17 @@ opensearch = 3.0.0 lucene = 9.8.0 bundled_jdk_vendor = adoptium -bundled_jdk = 20.0.2+9 -# See please https://github.com/adoptium/temurin-build/issues/3371 -bundled_jdk_linux_ppc64le = 20+36 +bundled_jdk = 21.0.1+12 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.15.2 -jackson_databind = 2.15.2 +jackson = 2.16.0 +jackson_databind = 2.16.0 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 -log4j = 2.20.0 +log4j = 2.21.0 slf4j = 1.7.36 asm = 9.6 jettison = 1.5.4 @@ -28,9 +26,13 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.99.Final +netty = 4.1.101.Final joda = 2.12.2 +# project reactor +reactor_netty = 1.1.12 +reactor = 3.5.11 + # client dependencies httpclient5 = 5.2.1 httpcore5 = 5.2.2 @@ -68,4 +70,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.30.1 +opentelemetry = 1.31.0 +opentelemetrysemconv = 1.21.0-alpha diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index d5c1888e78b5d..b0990560b08ba 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -72,7 +71,7 @@ public void testCreateAndDeletePit() throws IOException { assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); } - public void testDeleteAllAndListAllPits() throws IOException, InterruptedException { + public void testDeleteAllAndListAllPits() throws Exception { CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); @@ -91,11 +90,9 @@ public void testDeleteAllAndListAllPits() throws IOException, InterruptedExcepti List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse.getId())); assertTrue(pits.contains(pitResponse1.getId())); - CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener deletePitListener = new ActionListener<>() { @Override public void onResponse(DeletePitResponse response) { - countDownLatch.countDown(); for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { assertTrue(deletePitInfo.isSuccessful()); } @@ -103,19 +100,20 @@ public void onResponse(DeletePitResponse response) { @Override public void onFailure(Exception e) { - countDownLatch.countDown(); if (!(e instanceof OpenSearchStatusException)) { throw new AssertionError("Delete all failed"); } } }; final CreatePitResponse pitResponse3 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); - + assertTrue(pitResponse3.getId() != null); ActionListener getPitsListener = new ActionListener() { @Override public void onResponse(GetAllPitNodesResponse response) { List pits = response.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse3.getId())); + // delete all pits + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); } @Override @@ -126,11 +124,12 @@ public void onFailure(Exception e) { } }; highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + // validate no pits case - getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); - assertTrue(getAllPitResponse.getPitInfos().size() == 0); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + assertBusy(() -> { + GetAllPitNodesResponse getAllPitResponse1 = highLevelClient().getAllPits(RequestOptions.DEFAULT); + assertTrue(getAllPitResponse1.getPitInfos().size() == 0); + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + }); } } diff --git a/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..c2b70fb4ae202 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 @@ -0,0 +1 @@ +899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 761478a9fdc6e..8edd6a7354a16 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -34,6 +34,16 @@ logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling logger.deprecation.appenderRef.header_warning.ref = header_warning logger.deprecation.additivity = false +appender.search_request_slowlog_json_appender.type = Console +appender.search_request_slowlog_json_appender.name = search_request_slowlog_json_appender +appender.search_request_slowlog_json_appender.layout.type = OpenSearchJsonLayout +appender.search_request_slowlog_json_appender.layout.type_name = search_request_slowlog + +logger.search_request_slowlog_logger.name = cluster.search.request.slowlog +logger.search_request_slowlog_logger.level = trace +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_json_appender.ref = search_request_slowlog_json_appender +logger.search_request_slowlog_logger.additivity = false + appender.index_search_slowlog_rolling.type = Console appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.layout.type = OpenSearchJsonLayout diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 952110c6c0289..1a0abcbaf9c88 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -81,7 +81,7 @@ ${error.file} # JDK 20+ Incubating Vector Module for SIMD optimizations; # disabling may reduce performance on vector optimized lucene -20:--add-modules=jdk.incubator.vector +20-:--add-modules=jdk.incubator.vector # HDFS ForkJoinPool.common() support by SecurityManager -Djava.util.concurrent.ForkJoinPool.common.threadFactory=org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index bb27aaf2e22e6..d040afae82e53 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -113,6 +113,47 @@ logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling logger.deprecation.appenderRef.header_warning.ref = header_warning logger.deprecation.additivity = false +######## Search Request Slowlog JSON #################### +appender.search_request_slowlog_json_appender.type = RollingFile +appender.search_request_slowlog_json_appender.name = search_request_slowlog_json_appender +appender.search_request_slowlog_json_appender.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs\ + .cluster_name}_index_search_slowlog.json +appender.search_request_slowlog_json_appender.filePermissions = rw-r----- +appender.search_request_slowlog_json_appender.layout.type = OpenSearchJsonLayout +appender.search_request_slowlog_json_appender.layout.type_name = search_request_slowlog +appender.search_request_slowlog_json_appender.layout.opensearchmessagefields=message,took,took_millis,phase_took,total_hits,search_type,shards,source,id + +appender.search_request_slowlog_json_appender.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs\ + .cluster_name}_index_search_slowlog-%i.json.gz +appender.search_request_slowlog_json_appender.policies.type = Policies +appender.search_request_slowlog_json_appender.policies.size.type = SizeBasedTriggeringPolicy +appender.search_request_slowlog_json_appender.policies.size.size = 1GB +appender.search_request_slowlog_json_appender.strategy.type = DefaultRolloverStrategy +appender.search_request_slowlog_json_appender.strategy.max = 4 +################################################# +######## Search Request Slowlog Log File - old style pattern #### +appender.search_request_slowlog_log_appender.type = RollingFile +appender.search_request_slowlog_log_appender.name = search_request_slowlog_log_appender +appender.search_request_slowlog_log_appender.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}\ + _index_search_slowlog.log +appender.search_request_slowlog_log_appender.filePermissions = rw-r----- +appender.search_request_slowlog_log_appender.layout.type = PatternLayout +appender.search_request_slowlog_log_appender.layout.pattern = [%d{ISO8601}][%-5p][%c{1.}] [%node_name]%marker %m%n + +appender.search_request_slowlog_log_appender.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}\ + _index_search_slowlog-%i.log.gz +appender.search_request_slowlog_log_appender.policies.type = Policies +appender.search_request_slowlog_log_appender.policies.size.type = SizeBasedTriggeringPolicy +appender.search_request_slowlog_log_appender.policies.size.size = 1GB +appender.search_request_slowlog_log_appender.strategy.type = DefaultRolloverStrategy +appender.search_request_slowlog_log_appender.strategy.max = 4 +################################################# +logger.search_request_slowlog_logger.name = cluster.search.request.slowlog +logger.search_request_slowlog_logger.level = trace +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_json_appender.ref = search_request_slowlog_json_appender +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_log_appender.ref = search_request_slowlog_log_appender +logger.search_request_slowlog_logger.additivity = false + ######## Search slowlog JSON #################### appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java b/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java index 50c15bb7a95a8..c2ef08e288346 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java @@ -32,13 +32,16 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.BiConsumer; /** * A {@link BiConsumer}-like interface which allows throwing checked exceptions. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedBiConsumer { void accept(T t, U u) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java b/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java index 9c17ad4b4ee3f..927edd1b9905a 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Function; /** @@ -39,6 +41,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedFunction { R apply(T t) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index e060baf6f187e..da44c6fd4dcef 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; /** @@ -43,8 +45,9 @@ * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Explicit { private final T value; diff --git a/libs/common/src/main/java/org/opensearch/common/TriFunction.java b/libs/common/src/main/java/org/opensearch/common/TriFunction.java index 7b1bbece68680..8594e8e2cd0c9 100644 --- a/libs/common/src/main/java/org/opensearch/common/TriFunction.java +++ b/libs/common/src/main/java/org/opensearch/common/TriFunction.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + /** * Represents a function that accepts three arguments and produces a result. * @@ -40,8 +42,9 @@ * @param the type of the third argument * @param the return type * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface TriFunction { /** diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index 5c0e3f2de7708..a5d97dcd85ef7 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -32,12 +32,15 @@ package org.opensearch.common.collect; +import org.opensearch.common.annotation.PublicApi; + /** * Java 9 Tuple * todo: deprecate and remove w/ min jdk upgrade to 11? * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Tuple { public static Tuple tuple(V1 v1, V2 v2) { diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java index 8afa48eb92c0f..31d2dcd0dba3d 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java @@ -7,12 +7,17 @@ */ package org.opensearch.common.crypto; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.Closeable; import java.util.Map; /** * Master key provider responsible for management of master keys. + * + * @opensearch.experimental */ +@ExperimentalApi public interface MasterKeyProvider extends Closeable { /** diff --git a/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java b/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java index eb8a4e1382497..3095336338f7f 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java +++ b/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java @@ -8,13 +8,16 @@ package org.opensearch.common.io; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.InputStream; /** * Model composed of an input stream and the total content length of the stream * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public class InputStreamContainer { private final InputStream inputStream; diff --git a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java index 30bea6185febc..dfc4fefb9ee55 100644 --- a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java +++ b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java @@ -32,13 +32,16 @@ package org.opensearch.common.lease; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; /** * Specialization of {@link AutoCloseable} for calls that might not throw a checked exception. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Releasable extends Closeable { @Override diff --git a/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java new file mode 100644 index 0000000000000..5c3dcf2bd4708 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * It uses linear search on a sorted array of pre-computed round-down points. + * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of + * branch mispredictions and pipeline stalls, and accesses memory sequentially. + * + *

+ * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present + * at either side of the array. This is helpful for time-series data where velocity increases over time, so more + * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. + * + * @opensearch.internal + */ +@InternalApi +class BidirectionalLinearSearcher implements Roundable { + private final long[] ascending; + private final long[] descending; + + BidirectionalLinearSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int len = (size + 1) >>> 1; // rounded-up to handle odd number of values + ascending = new long[len]; + descending = new long[len]; + + for (int i = 0; i < len; i++) { + ascending[i] = values[i]; + descending[i] = values[size - i - 1]; + } + } + + @Override + public long floor(long key) { + int i = 0; + for (; i < ascending.length; i++) { + if (descending[i] <= key) { + return descending[i]; + } + if (ascending[i] > key) { + assert i > 0 : "key must be greater than or equal to " + ascending[0]; + return ascending[i - 1]; + } + } + return ascending[i - 1]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java new file mode 100644 index 0000000000000..b9d76945115ed --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import java.util.Arrays; + +/** + * It uses binary search on a sorted array of pre-computed round-down points. + * + * @opensearch.internal + */ +@InternalApi +class BinarySearcher implements Roundable { + private final long[] values; + private final int size; + + BinarySearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + this.values = values; + this.size = size; + } + + @Override + public long floor(long key) { + int idx = Arrays.binarySearch(values, 0, size, key); + assert idx != -1 : "key must be greater than or equal to " + values[0]; + if (idx < 0) { + idx = -2 - idx; + } + return values[idx]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/Roundable.java b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java new file mode 100644 index 0000000000000..ae6f9b787c1e9 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Interface to round-off values. + * + * @opensearch.internal + */ +@InternalApi +@FunctionalInterface +public interface Roundable { + /** + * Returns the greatest lower bound of the given key. + * In other words, it returns the largest value such that {@code value <= key}. + * @param key to floor + * @return the floored value + */ + long floor(long key); +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..b7422694c3013 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: PR #9727 + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java b/libs/common/src/main/java/org/opensearch/common/round/package-info.java similarity index 72% rename from libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java rename to libs/common/src/main/java/org/opensearch/common/round/package-info.java index 9feb862a4e010..e79c4017de31b 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java +++ b/libs/common/src/main/java/org/opensearch/common/round/package-info.java @@ -7,6 +7,6 @@ */ /** - * Contains No-op implementations + * Contains classes to round-off values. */ -package org.opensearch.telemetry.tracing.http; +package org.opensearch.common.round; diff --git a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java new file mode 100644 index 0000000000000..ae9f629c59024 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.test.OpenSearchTestCase; + +public class RoundableTests extends OpenSearchTestCase { + + public void testFloor() { + int size = randomIntBetween(1, 256); + long[] values = new long[size]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; + } + + Roundable[] impls = { new BinarySearcher(values, size), new BidirectionalLinearSearcher(values, size) }; + + for (int i = 0; i < 100000; i++) { + // Index of the expected round-down point. + int idx = randomIntBetween(0, size - 1); + + // Value of the expected round-down point. + long expected = values[idx]; + + // Delta between the expected and the next round-down point. + long delta = (idx < size - 1) ? (values[idx + 1] - values[idx]) : 200; + + // Adding a random delta between 0 (inclusive) and delta (exclusive) to the expected + // round-down point, which will still floor to the same value. + long key = expected + (randomNonNegativeLong() % delta); + + for (Roundable roundable : impls) { + assertEquals(expected, roundable.floor(key)); + } + } + } + + public void testFailureCases() { + Throwable throwable; + + throwable = assertThrows(IllegalArgumentException.class, () -> new BinarySearcher(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + throwable = assertThrows(IllegalArgumentException.class, () -> new BidirectionalLinearSearcher(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + + throwable = assertThrows(AssertionError.class, () -> new BinarySearcher(new long[] { 100 }, 1).floor(50)); + assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + throwable = assertThrows(AssertionError.class, () -> new BidirectionalLinearSearcher(new long[] { 100 }, 1).floor(50)); + assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + } +} diff --git a/libs/core/licenses/jackson-core-2.15.2.jar.sha1 b/libs/core/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/libs/core/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.0.jar.sha1 b/libs/core/licenses/jackson-core-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..c2b70fb4ae202 --- /dev/null +++ b/libs/core/licenses/jackson-core-2.16.0.jar.sha1 @@ -0,0 +1 @@ +899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 b/libs/core/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.21.0.jar.sha1 b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java b/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java index c2516402b0d30..26aff04b30a56 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java @@ -32,6 +32,7 @@ package org.opensearch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -40,8 +41,9 @@ /** * Unchecked exception that is translated into a {@code 400 BAD REQUEST} error when it bubbles out over HTTP. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchParseException extends OpenSearchException { public OpenSearchParseException(String msg, Object... args) { diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 3c210e5c7cf96..8d9ee73a02c1d 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -96,7 +96,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java index 041d8b1bffb4a..7525bfb243aae 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java @@ -32,6 +32,7 @@ package org.opensearch.core.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; @@ -42,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ActionResponse extends TransportResponse { public ActionResponse() {} diff --git a/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java b/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java index 6af9ca005d171..f087322e0024c 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java @@ -32,6 +32,8 @@ package org.opensearch.core.action; +import org.opensearch.common.annotation.PublicApi; + import java.util.concurrent.atomic.AtomicBoolean; /** @@ -39,8 +41,9 @@ * the is called is only called once. Subclasses should implement notification logic with * innerOnResponse and innerOnFailure. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class NotifyOnceListener implements ActionListener { private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false); diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java index af9df51655414..960f4bec5eeb5 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -41,8 +43,9 @@ * across the wire" using OpenSearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface Writeable { /** * A WriteableRegistry registers {@link Writer} methods for writing data types over a @@ -135,8 +138,11 @@ public static Class getCustomClassFromInstance(final Object value) { * out.writeMapOfLists(someMap, StreamOutput::writeString, StreamOutput::writeString); * } * + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Writer { /** @@ -161,8 +167,11 @@ interface Writer { * this.someMap = in.readMapOfLists(StreamInput::readString, StreamInput::readString); * } * + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Reader { /** diff --git a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java index cd75bddd680e5..c7b9bee3cbf4d 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java +++ b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java @@ -30,6 +30,13 @@ * GitHub history for details. */ +/* + * This code is based on code from SFL4J 1.5.11 + * Copyright (c) 2004-2007 QOS.ch + * All rights reserved. + * SPDX-License-Identifier: MIT + */ + package org.opensearch.core.common.logging; import java.util.HashSet; diff --git a/libs/core/src/main/java/org/opensearch/core/common/text/Text.java b/libs/core/src/main/java/org/opensearch/core/common/text/Text.java index ca5402edae59e..3a46bd4602297 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/text/Text.java +++ b/libs/core/src/main/java/org/opensearch/core/common/text/Text.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.text; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContentFragment; @@ -44,8 +45,9 @@ * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if * the other is requests, caches the other one in a local reference so no additional conversion will be needed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Text implements Comparable, ToXContentFragment { public static final Text[] EMPTY_ARRAY = new Text[0]; diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java index 551504ed6f719..3b5fbb7d76307 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.NetworkAddress; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TransportAddress implements Writeable, ToXContentFragment { /** diff --git a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java index e50f24417f8bc..f4d81c4ca4363 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java @@ -33,14 +33,16 @@ package org.opensearch.core.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.nio.ByteBuffer; /** * Abstraction of an array of byte values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ByteArray extends BigArray { /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java index 3ce8b4953b9d6..992655efec8f0 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java @@ -32,6 +32,7 @@ package org.opensearch.core.indices.breaker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Stats class encapsulating all of the different circuit breaker stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { /** An array of all the circuit breaker stats */ diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java index ee9c94f432a36..dedeb0803271f 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java @@ -32,8 +32,7 @@ package org.opensearch.core.indices.breaker; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.core.common.breaker.CircuitBreaker; @@ -41,11 +40,10 @@ * Interface for Circuit Breaker services, which provide breakers to classes * that load field data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class CircuitBreakerService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); - protected CircuitBreakerService() {} /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java index 9207d3ea77227..ee71cf8d2ac0e 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java @@ -32,6 +32,7 @@ package org.opensearch.core.indices.breaker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Class encapsulating stats about the {@link org.opensearch.core.common.breaker.CircuitBreaker} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CircuitBreakerStats implements Writeable, ToXContentObject { /** The name of the circuit breaker */ diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java index d65f75581dd1b..e99afbb759031 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java @@ -8,11 +8,14 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Different resource stats are defined. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public enum ResourceStats { CPU("cpu_time_in_nanos"), MEMORY("memory_in_bytes"); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java index fce8cc65e9bc5..2aedff2940d83 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java @@ -8,11 +8,14 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Defines the different types of resource stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public enum ResourceStatsType { // resource stats of the worker thread reported directly from runnable. WORKER_STATS("worker_stats", false); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java index 2cbc3d4b2f5c3..a278b61894a65 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import java.util.Collections; import java.util.EnumMap; @@ -22,8 +23,9 @@ * It captures the resource usage information like memory, CPU about a particular execution of thread * for a specific stats type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ResourceUsageInfo { private static final Logger logger = LogManager.getLogger(ResourceUsageInfo.class); private final EnumMap statsInfo = new EnumMap<>(ResourceStats.class); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java index 262dbe20dabda..f4cce2de820a0 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java @@ -8,11 +8,14 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Information about resource usage * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ResourceUsageMetric { private final ResourceStats stats; private final long value; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java index d0d26550a4742..048c4a228fbd5 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java @@ -9,6 +9,7 @@ package org.opensearch.core.tasks.resourcetracker; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -29,8 +30,9 @@ * Writeable TaskResourceStats objects are used to represent resource * snapshot information about currently running task. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskResourceStats implements Writeable, ToXContentFragment { private final Map resourceUsage; private final TaskThreadUsage threadUsage; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java index 7d6cadbef23d7..654f1c5695937 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java @@ -8,6 +8,7 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -30,8 +31,9 @@ * Writeable TaskResourceUsage objects are used to represent resource usage * information of running tasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskResourceUsage implements Writeable, ToXContentFragment { private static final ParseField CPU_TIME_IN_NANOS = new ParseField("cpu_time_in_nanos"); private static final ParseField MEMORY_IN_BYTES = new ParseField("memory_in_bytes"); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java index b593ec96e5996..abe03e3c520e0 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java @@ -8,6 +8,7 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -29,8 +30,9 @@ * Writeable TaskThreadExecutions objects are used to represent thread related resource usage of running tasks. * asd * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskThreadUsage implements Writeable, ToXContentFragment { private static final String THREAD_EXECUTIONS = "thread_executions"; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java index 4b341a94256c4..703fdfdf8a784 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java @@ -8,14 +8,17 @@ package org.opensearch.core.tasks.resourcetracker; +import org.opensearch.common.annotation.PublicApi; + /** * Resource consumption information about a particular execution of thread. *

* It captures the resource usage information about a particular execution of thread * for a specific stats type like worker_stats or response_stats etc., * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ThreadResourceInfo { private final long threadId; private volatile boolean isActive = true; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java index 79e531a542026..32bbfc600f1f0 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; @@ -47,8 +48,9 @@ /** * Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AbstractObjectParser { /** diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java index d50dd2e68d890..f6e5647532bee 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java @@ -32,11 +32,16 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * Reads an object from a parser using some context. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface ContextParser { T parse(XContentParser p, Context c) throws IOException; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java b/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java index 570a13ad8e093..a0e4027290742 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java @@ -32,12 +32,17 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Supplier; /** * Callback for notifying the creator of the {@link XContentParser} that * parsing hit a deprecated field. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DeprecationHandler { /** * Throws an {@link UnsupportedOperationException} when parsing hits a diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java index 365b36c755dd2..04d0bce27c04f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java @@ -32,6 +32,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import java.io.IOException; @@ -83,8 +84,9 @@ * It's highly recommended to use the high level declare methods like {@link #declareString(BiConsumer, ParseField)} instead of * {@link #declareField} which can be used to implement exceptional parsing operations not covered by the high level methods. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ObjectParser extends AbstractObjectParser implements BiFunction, diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java index 90dd0cbfb9a1a..ee8dad198df09 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Map; @@ -42,15 +43,17 @@ * The output may or may not be a value object. Objects implementing {@link ToXContentObject} output a valid value * but those that don't may or may not require emitting a startObject and an endObject. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ToXContent { /** * Base parameters class * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Params { String param(String key); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java index dbc0041af42b5..1ebdd69d2b7a3 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java @@ -32,6 +32,8 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -41,7 +43,10 @@ /** * A generic abstraction on top of handling content, inspired by JSON and pull parsing. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface XContent { /** * The type this content handles and produces. diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java index 4bfd47ccfdc94..85c3579b74cd5 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import java.io.Closeable; import java.io.IOException; @@ -53,8 +54,9 @@ * NamedXContentRegistry.EMPTY, ParserField."{\"key\" : \"value\"}"); * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface XContentParser extends Closeable { /** diff --git a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java index b6dc0ceb1028f..828d4b7de450e 100644 --- a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java @@ -231,7 +231,10 @@ public Map parse(String inputString) { int lookAheadMatches; // start walking the input string byte by byte, look ahead for matches where needed // if a match is found jump forward to the end of the match - for (; i < input.length; i++) { + while (i < input.length) { + // start is only used to record the value of i + int start = i; + lookAheadMatches = 0; // potential match between delimiter and input string if (delimiter.length > 0 && input[i] == delimiter[0]) { @@ -283,8 +286,14 @@ public Map parse(String inputString) { delimiter = dissectPair.getDelimiter().getBytes(StandardCharsets.UTF_8); // i is always one byte after the last found delimiter, aka the start of the next value valueStart = i; + } else { + i++; } + } else { + i++; } + // i should change anyway + assert (i != start); } // the last key, grab the rest of the input (unless consecutive delimiters already grabbed the last key) // and there is no trailing delimiter diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java index 2f70c28efb1cd..fb3dec8152b4f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -10,14 +10,12 @@ import org.opensearch.common.annotation.ExperimentalApi; -import java.io.Closeable; - /** * Interface for metrics telemetry providers * * @opensearch.experimental */ @ExperimentalApi -public interface MetricsTelemetry extends MetricsRegistry, Closeable { +public interface MetricsTelemetry extends MetricsRegistry { } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java index a5d515443b54d..decbf49f795c4 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java @@ -44,23 +44,23 @@ private DefaultSpanScope(Span span, SpanScope previousSpanScope, TracerContextSt public static SpanScope create(Span span, TracerContextStorage tracerContextStorage) { final SpanScope beforeSpanScope = spanScopeThreadLocal.get(); SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpanScope, tracerContextStorage); - spanScopeThreadLocal.set(newSpanScope); return newSpanScope; } @Override public void close() { detach(); - spanScopeThreadLocal.set(previousSpanScope); } @Override public SpanScope attach() { + spanScopeThreadLocal.set(this); tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, this.span); return this; } private void detach() { + spanScopeThreadLocal.set(previousSpanScope); if (previousSpanScope != null) { tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, previousSpanScope.getSpan()); } else { diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index 79b7e4aca6c2f..8f1a26d99e725 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -12,7 +12,7 @@ import java.io.Closeable; import java.io.IOException; -import java.util.List; +import java.util.Collection; import java.util.Map; import java.util.Optional; @@ -53,7 +53,6 @@ public Span startSpan(SpanCreationContext context) { parentSpan = getCurrentSpanInternal(); } Span span = createSpan(context, parentSpan); - setCurrentSpanInContext(span); addDefaultAttributes(span); return span; } @@ -85,12 +84,13 @@ public SpanScope withSpanInScope(Span span) { return DefaultSpanScope.create(span, tracerContextStorage).attach(); } - private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { - return tracingTelemetry.createSpan(spanCreationContext, parentSpan); + @Override + public boolean isRecording() { + return true; } - private void setCurrentSpanInContext(Span span) { - tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, span); + private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + return tracingTelemetry.createSpan(spanCreationContext, parentSpan); } /** @@ -102,7 +102,7 @@ protected void addDefaultAttributes(Span span) { } @Override - public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { + public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { Optional propagatedSpan = tracingTelemetry.getContextPropagator().extractFromHeaders(headers); return startSpan(spanCreationContext.parent(propagatedSpan.map(SpanContext::new).orElse(null))); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index e6d4878a5e833..9b49ca7668992 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -9,7 +9,7 @@ package org.opensearch.telemetry.tracing; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.telemetry.tracing.http.HttpTracer; +import org.opensearch.telemetry.tracing.transport.TransportTracer; import java.io.Closeable; @@ -22,7 +22,7 @@ * @opensearch.experimental */ @ExperimentalApi -public interface Tracer extends HttpTracer, Closeable { +public interface Tracer extends TransportTracer, Closeable { /** * Starts the {@link Span} with given {@link SpanCreationContext} * @@ -53,4 +53,10 @@ public interface Tracer extends HttpTracer, Closeable { */ SpanScope withSpanInScope(Span span); + /** + * Tells if the traces are being recorded or not + * @return boolean + */ + boolean isRecording(); + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java index 5fbc5d329e227..d7d48d1db10d6 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java @@ -10,7 +10,7 @@ import org.opensearch.common.annotation.ExperimentalApi; -import java.util.List; +import java.util.Collection; import java.util.Map; import java.util.Optional; import java.util.function.BiConsumer; @@ -36,7 +36,7 @@ public interface TracingContextPropagator { * @param headers request headers to extract the context from * @return current span */ - Optional extractFromHeaders(Map> headers); + Optional extractFromHeaders(Map> headers); /** * Injects tracing context diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index c073e8d3e766f..c57eaccf1f3df 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -16,7 +16,7 @@ import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import java.util.List; +import java.util.Collection; import java.util.Map; /** @@ -54,13 +54,18 @@ public SpanScope withSpanInScope(Span span) { return SpanScope.NO_OP; } + @Override + public boolean isRecording() { + return false; + } + @Override public void close() { } @Override - public Span startSpan(SpanCreationContext spanCreationContext, Map> header) { + public Span startSpan(SpanCreationContext spanCreationContext, Map> header) { return NoopSpan.INSTANCE; } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java similarity index 64% rename from libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java rename to libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java index 50d18c0a0d040..5883d7de8e83a 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java @@ -6,31 +6,31 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing.http; +package org.opensearch.telemetry.tracing.transport; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanCreationContext; -import java.util.List; +import java.util.Collection; import java.util.Map; /** - * HttpTracer helps in creating a {@link Span} which reads the incoming tracing information - * from the HttpRequest header and propagate the span accordingly. + * TransportTracer helps in creating a {@link Span} which reads the incoming tracing information + * from the HTTP or TCP transport headers and propagate the span accordingly. *

* All methods on the Tracer object are multi-thread safe. * * @opensearch.experimental */ @ExperimentalApi -public interface HttpTracer { +public interface TransportTracer { /** * Start the span with propagating the tracing info from the HttpRequest header. * * @param spanCreationContext span name. - * @param header http request header. - * @return span. + * @param headers transport headers + * @return the span instance */ - Span startSpan(SpanCreationContext spanCreationContext, Map> header); + Span startSpan(SpanCreationContext spanCreationContext, Map> headers); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java new file mode 100644 index 0000000000000..87ffcc43184bb --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains HTTP or TCP transport related tracer capabilities + */ +package org.opensearch.telemetry.tracing.transport; diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 0a717e993cb81..2182b3ea28ac8 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -22,6 +22,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -35,7 +37,6 @@ public class DefaultTracerTests extends OpenSearchTestCase { private Span mockSpan; private Span mockParentSpan; - private SpanScope mockSpanScope; private ThreadPool threadPool; private ExecutorService executorService; private SpanCreationContext spanCreationContext; @@ -62,6 +63,7 @@ public void testCreateSpan() { String spanName = defaultTracer.getCurrentSpan().getSpan().getSpanName(); assertEquals("span_name", spanName); + assertTrue(defaultTracer.isRecording()); } @SuppressWarnings("unchecked") @@ -101,11 +103,11 @@ public void testCreateSpanWithAttributes() { Span span = defaultTracer.startSpan(spanCreationContext); - assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(1.0, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key1")); - assertEquals(2l, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key2")); - assertEquals(true, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key3")); - assertEquals("key4", ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key4")); + assertThat(defaultTracer.getCurrentSpan(), is(nullValue())); + assertEquals(1.0, ((MockSpan) span).getAttribute("key1")); + assertEquals(2l, ((MockSpan) span).getAttribute("key2")); + assertEquals(true, ((MockSpan) span).getAttribute("key3")); + assertEquals("key4", ((MockSpan) span).getAttribute("key4")); span.endSpan(); } @@ -120,16 +122,18 @@ public void testCreateSpanWithParent() { Span span = defaultTracer.startSpan(spanCreationContext, null); - SpanContext parentSpan = defaultTracer.getCurrentSpan(); - - SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); + try (final SpanScope scope = defaultTracer.withSpanInScope(span)) { + SpanContext parentSpan = defaultTracer.getCurrentSpan(); - Span span1 = defaultTracer.startSpan(spanCreationContext1); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); - assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); - span1.endSpan(); - span.endSpan(); + try (final ScopedSpan span1 = defaultTracer.startScopedSpan(spanCreationContext1)) { + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + } + } finally { + span.endSpan(); + } } @SuppressWarnings("unchecked") @@ -154,8 +158,7 @@ public void testCreateSpanWithNullParent() { Span span = defaultTracer.startSpan(spanCreationContext); - assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(null, defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + assertThat(defaultTracer.getCurrentSpan(), is(nullValue())); span.endSpan(); } @@ -402,7 +405,6 @@ private void setupMocks() { mockTracingTelemetry = mock(TracingTelemetry.class); mockSpan = mock(Span.class); mockParentSpan = mock(Span.class); - mockSpanScope = mock(SpanScope.class); mockTracerContextStorage = mock(TracerContextStorage.class); when(mockSpan.getSpanName()).thenReturn("span_name"); when(mockSpan.getSpanId()).thenReturn("span_id"); diff --git a/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..c2b70fb4ae202 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 @@ -0,0 +1 @@ +899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 deleted file mode 100644 index 0022265a84b68..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -baafc85c70765594add14bd93f3efd68e1945b76 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..8da478fc6013d --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 @@ -0,0 +1 @@ +35e8b7bf4fc1d078766bb155103d433ed5bb1627 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 deleted file mode 100644 index 2b8caad846fec..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16d1dd22f7d641459ed056399d4f7df0220f1176 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..3e952ffe92418 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3c422d7f3901c9a1becf9df3cf41efc68a5ab95c \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 deleted file mode 100644 index 4ad7255e2318f..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58194ff9f51915ad6bf6b6f24818232d7566418a \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..d62b5874ab023 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 @@ -0,0 +1 @@ +2033e2c5f531785d17f3a2bc31842e3bbb7983b2 \ No newline at end of file diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java index 9d1581a3a1517..453107fe4ff65 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java @@ -35,6 +35,7 @@ import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.cbor.CborXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.smile.SmileXContent; @@ -49,7 +50,9 @@ /** * The content type of {@link XContent}. + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum XContentType implements MediaType { /** diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index edb8c37c2dbdd..cf2736a8583d2 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -394,7 +394,17 @@ public Map> getTokenizers() { // TODO deprecate and remove in API tokenizers.put("lowercase", XLowerCaseTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); - tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); + tokenizers.put("PathHierarchy", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + // TODO Remove "PathHierarchy" tokenizer name in 4.0 and throw exception + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + deprecationLogger.deprecate( + "PathHierarchy_tokenizer_deprecation", + "The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [path_hierarchy] instead." + ); + } + return new PathHierarchyTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("pattern", PatternTokenizerFactory::new); tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); tokenizers.put("whitespace", WhitespaceTokenizerFactory::new); @@ -662,8 +672,17 @@ public List getPreConfiguredTokenizers() { } return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); - tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); - + tokenizers.add(PreConfiguredTokenizer.openSearchVersion("PathHierarchy", (version) -> { + // TODO Remove "PathHierarchy" tokenizer name in 4.0 and throw exception + if (version.onOrAfter(Version.V_3_0_0)) { + deprecationLogger.deprecate( + "PathHierarchy_tokenizer_deprecation", + "The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [path_hierarchy] instead." + ); + } + return new PathHierarchyTokenizer(); + })); return tokenizers; } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java index 1fe7c582449ec..555d6c78b6ec5 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java @@ -35,16 +35,61 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.analysis.Tokenizer; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTokenStreamTestCase; +import org.opensearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; +import java.util.Collections; public class PathHierarchyTokenizerFactoryTests extends OpenSearchTokenStreamTestCase { + private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisModulePlugin())) + .getAnalysisRegistry() + .build(idxSettings); + } + + /** + * Test that deprecated "PathHierarchy" tokenizer name is still available via {@link CommonAnalysisModulePlugin} starting in 3.x. + */ + public void testPreConfiguredTokenizer() throws IOException { + + { + try ( + IndexAnalyzers indexAnalyzers = buildAnalyzers( + VersionUtils.randomVersionBetween(random(), Version.V_3_0_0, Version.CURRENT), + "PathHierarchy" + ) + ) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertTokenStreamContents(analyzer.tokenStream("dummy", "/a/b/c"), new String[] { "/a", "/a/b", "/a/b/c" }); + // Once LUCENE-12750 is fixed we can use the following testing method instead. + // Similar testing approach has been used for deprecation of (Edge)NGrams tokenizers as well. + // assertAnalyzesTo(analyzer, "/a/b/c", new String[] { "/a", "/a/b", "/a/b/c" }); + + } + } + } + public void testDefaults() throws IOException { final Index index = new Index("test", "_na_"); final Settings indexSettings = newAnalysisSettingsBuilder().build(); diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 56ed2175df60a..179de835a4105 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -298,6 +298,9 @@ --- "path_hierarchy": + - skip: + features: "allowed_warnings" + - do: indices.analyze: body: @@ -312,6 +315,8 @@ - match: { detail.tokenizer.tokens.2.token: a/b/c } - do: + allowed_warnings: + - 'The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. Please change the tokenizer name to [path_hierarchy] instead.' indices.analyze: body: text: "a/b/c" @@ -337,11 +342,13 @@ - match: { detail.tokenizer.tokens.2.token: a/b/c } - do: + allowed_warnings: + - 'The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. Please change the tokenizer name to [path_hierarchy] instead.' indices.analyze: body: text: "a/b/c" explain: true - tokenizer: PathHierarchy + tokenizer: PathHierarchy - length: { detail.tokenizer.tokens: 3 } - match: { detail.tokenizer.name: PathHierarchy } - match: { detail.tokenizer.tokens.0.token: a } diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java index 86d8ad2968e7f..7316847ac6046 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java @@ -87,7 +87,7 @@ protected boolean forbidPrivateIndexSettings() { */ protected void prepareGeoShapeIndexForAggregations(final Random random) throws Exception { expectedDocsCountForGeoShapes = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List geoshapes = new ArrayList<>(); assertAcked(prepareCreate(GEO_SHAPE_INDEX_NAME).setSettings(settings).setMapping(GEO_SHAPE_FIELD_NAME, "type" + "=geo_shape")); boolean isShapeIntersectingBB = false; @@ -136,7 +136,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep expectedDocCountsForSingleGeoPoint = new HashMap<>(); createIndex("idx_unmapped"); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put("index.number_of_shards", 4) .put("index.number_of_replicas", 0) .build(); @@ -160,7 +160,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep protected void prepareMultiValuedGeoPointIndex(final Random random) throws Exception { multiValuedExpectedDocCountsGeoPoint = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List cities = new ArrayList<>(); assertAcked( prepareCreate("multi_valued_idx").setSettings(settings) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java index 39c2d67ac0b85..0eab6334854ab 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java @@ -118,25 +118,15 @@ public Processor create( ) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); if (field.contains(".") == false) { - throw ConfigurationUtils.newConfigurationException( - ConfigurationUtils.TAG_KEY, - tag, - "field", - "field does not contain a dot" - ); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "field does not contain a dot"); } if (field.indexOf('.') == 0 || field.lastIndexOf('.') == field.length() - 1) { - throw ConfigurationUtils.newConfigurationException( - ConfigurationUtils.TAG_KEY, - tag, - "field", - "Field can't start or end with a dot" - ); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "Field can't start or end with a dot"); } int firstIndex = -1; for (int index = field.indexOf('.'); index != -1; index = field.indexOf('.', index + 1)) { if (index - firstIndex == 1) { - throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", "No space between dots"); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "No space between dots"); } firstIndex = index; } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index 0f8422ea474d2..a2a51d968e078 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -98,7 +98,7 @@ public Map getProcessors(Processor.Parameters paramet processors.put(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService)); processors.put(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory()); processors.put(JsonProcessor.TYPE, new JsonProcessor.Factory()); - processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()); + processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory(parameters.scriptService)); processors.put(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()); processors.put(BytesProcessor.TYPE, new BytesProcessor.Factory()); processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)); diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java index ff3cca4ce111f..73f03b3cb2e0f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java @@ -33,10 +33,13 @@ package org.opensearch.ingest.common; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; +import org.opensearch.script.ScriptService; +import org.opensearch.script.TemplateScript; import java.util.Collections; import java.util.List; @@ -56,24 +59,24 @@ public final class KeyValueProcessor extends AbstractProcessor { private static final Pattern STRIP_BRACKETS = Pattern.compile("(^[\\(\\[<\"'])|([\\]\\)>\"']$)"); - private final String field; + private final TemplateScript.Factory field; private final String fieldSplit; private final String valueSplit; private final Set includeKeys; private final Set excludeKeys; - private final String targetField; + private final TemplateScript.Factory targetField; private final boolean ignoreMissing; private final Consumer execution; KeyValueProcessor( String tag, String description, - String field, + TemplateScript.Factory field, String fieldSplit, String valueSplit, Set includeKeys, Set excludeKeys, - String targetField, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, @@ -106,10 +109,10 @@ public final class KeyValueProcessor extends AbstractProcessor { private static Consumer buildExecution( String fieldSplit, String valueSplit, - String field, + TemplateScript.Factory field, Set includeKeys, Set excludeKeys, - String targetField, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, @@ -130,41 +133,62 @@ private static Consumer buildExecution( keyFilter = key -> includeKeys.contains(key) && excludeKeys.contains(key) == false; } } - final String fieldPathPrefix; - String keyPrefix = prefix == null ? "" : prefix; - if (targetField == null) { - fieldPathPrefix = keyPrefix; - } else { - fieldPathPrefix = targetField + "." + keyPrefix; - } - final Function keyPrefixer; - if (fieldPathPrefix.isEmpty()) { - keyPrefixer = val -> val; - } else { - keyPrefixer = val -> fieldPathPrefix + val; - } - final Function fieldSplitter = buildSplitter(fieldSplit, true); - Function valueSplitter = buildSplitter(valueSplit, false); - final Function keyTrimmer = buildTrimmer(trimKey); - final Function bracketStrip; - if (stripBrackets) { - bracketStrip = val -> STRIP_BRACKETS.matcher(val).replaceAll(""); - } else { - bracketStrip = val -> val; - } - final Function valueTrimmer = buildTrimmer(trimValue); + return document -> { - String value = document.getFieldValue(field, String.class, ignoreMissing); + final String fieldPathPrefix; + String keyPrefix = prefix == null ? "" : prefix; + if (targetField != null) { + String targetFieldPath = document.renderTemplate(targetField); + if (!Strings.isNullOrEmpty((targetFieldPath))) { + fieldPathPrefix = targetFieldPath + "." + keyPrefix; + } else { + fieldPathPrefix = keyPrefix; + } + } else { + fieldPathPrefix = keyPrefix; + } + + final Function keyPrefixer; + if (fieldPathPrefix.isEmpty()) { + keyPrefixer = val -> val; + } else { + keyPrefixer = val -> fieldPathPrefix + val; + } + final Function fieldSplitter = buildSplitter(fieldSplit, true); + Function valueSplitter = buildSplitter(valueSplit, false); + final Function keyTrimmer = buildTrimmer(trimKey); + final Function bracketStrip; + if (stripBrackets) { + bracketStrip = val -> STRIP_BRACKETS.matcher(val).replaceAll(""); + } else { + bracketStrip = val -> val; + } + final Function valueTrimmer = buildTrimmer(trimValue); + + String path = document.renderTemplate(field); + final boolean fieldPathNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathNullOrEmpty || document.hasField(path, true) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } + } + + String value = document.getFieldValue(path, String.class, ignoreMissing); if (value == null) { if (ignoreMissing) { return; } - throw new IllegalArgumentException("field [" + field + "] is null, cannot extract key-value pairs."); + throw new IllegalArgumentException("field [" + path + "] is null, cannot extract key-value pairs. "); } + for (String part : fieldSplitter.apply(value)) { String[] kv = valueSplitter.apply(part); if (kv.length != 2) { - throw new IllegalArgumentException("field [" + field + "] does not contain value_split [" + valueSplit + "]"); + throw new IllegalArgumentException("field [" + path + "] does not contain value_split [" + valueSplit + "]"); } String key = keyTrimmer.apply(kv[0]); if (keyFilter.test(key)) { @@ -193,7 +217,7 @@ private static Function buildSplitter(String split, boolean fi } } - String getField() { + TemplateScript.Factory getField() { return field; } @@ -213,7 +237,7 @@ Set getExcludeKeys() { return excludeKeys; } - String getTargetField() { + TemplateScript.Factory getTargetField() { return targetField; } @@ -241,6 +265,12 @@ public String getType() { } public static class Factory implements Processor.Factory { + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public KeyValueProcessor create( Map registry, @@ -249,7 +279,13 @@ public KeyValueProcessor create( Map config ) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + TemplateScript.Factory fieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", field, scriptService); String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = null; + if (!Strings.isNullOrEmpty(targetField)) { + targetFieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "target_field", targetField, scriptService); + } + String fieldSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field_split"); String valueSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "value_split"); String trimKey = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "trim_key"); @@ -270,12 +306,12 @@ public KeyValueProcessor create( return new KeyValueProcessor( processorTag, description, - field, + fieldTemplate, fieldSplit, valueSplit, includeKeys, excludeKeys, - targetField, + targetFieldTemplate, ignoreMissing, trimKey, trimValue, diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java index ca0c0df40f009..e42a1147825d1 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java @@ -155,4 +155,28 @@ public void testNullValueWithOutIgnoreMissing() { IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); } + + public void testMatchEmptyBrackets() { + IngestDocument ingestDocument = new IngestDocument( + "_index", + "_id", + null, + null, + null, + Collections.singletonMap("message", "[foo],[bar],[]") + ); + DissectProcessor dissectProcessor = new DissectProcessor("", null, "message", "[%{a}],[%{b}],[%{c}]", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("foo", ingestDocument.getFieldValue("a", String.class)); + assertEquals("bar", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + + ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("message", "{}{}{}{baz}")); + dissectProcessor = new DissectProcessor("", null, "message", "{%{a}}{%{b}}{%{c}}{%{d}}", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("", ingestDocument.getFieldValue("a", String.class)); + assertEquals("", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + assertEquals("baz", ingestDocument.getFieldValue("d", String.class)); + } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java index 62060a682c0cb..78972ff8d5dea 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java @@ -35,7 +35,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; import org.opensearch.common.util.set.Sets; +import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -48,8 +50,14 @@ public class KeyValueProcessorFactoryTests extends OpenSearchTestCase { + private KeyValueProcessor.Factory factory; + + @Before + public void init() { + factory = new KeyValueProcessor.Factory(TestTemplateService.instance()); + } + public void testCreateWithDefaults() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -57,7 +65,7 @@ public void testCreateWithDefaults() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), is(nullValue())); @@ -66,7 +74,6 @@ public void testCreateWithDefaults() throws Exception { } public void testCreateWithAllFieldsSet() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -78,17 +85,16 @@ public void testCreateWithAllFieldsSet() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), equalTo(Sets.newHashSet("a", "b"))); assertThat(processor.getExcludeKeys(), equalTo(Collections.emptySet())); - assertThat(processor.getTargetField(), equalTo("target")); + assertThat(processor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); assertTrue(processor.isIgnoreMissing()); } public void testCreateWithMissingField() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); String processorTag = randomAlphaOfLength(10); OpenSearchException exception = expectThrows( @@ -99,7 +105,6 @@ public void testCreateWithMissingField() { } public void testCreateWithMissingFieldSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAlphaOfLength(10); @@ -111,7 +116,6 @@ public void testCreateWithMissingFieldSplit() { } public void testCreateWithMissingValueSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java index 685a78e2e769b..5f71ea6f16a4f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java @@ -36,6 +36,7 @@ import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -51,7 +52,7 @@ public class KeyValueProcessorTests extends OpenSearchTestCase { - private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(); + private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(TestTemplateService.instance()); public void test() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); @@ -123,7 +124,12 @@ public void testMissingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); Processor processor = createKvProcessor("unknown", "&", "=", null, null, "target", false); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); - assertThat(exception.getMessage(), equalTo("field [unknown] not present as part of path [unknown]")); + assertThat(exception.getMessage(), equalTo("field [unknown] doesn't exist")); + + // when using template snippet, the resolved field path maybe empty + Processor processorWithEmptyFieldPath = createKvProcessor("", "&", "=", null, null, "target", false); + exception = expectThrows(IllegalArgumentException.class, () -> processorWithEmptyFieldPath.execute(ingestDocument)); + assertThat(exception.getMessage(), equalTo("field path cannot be null nor empty")); } public void testNullValueWithIgnoreMissing() throws Exception { diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml index 836243652b2e0..30a0a520b5c40 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml @@ -39,3 +39,151 @@ teardown: id: 1 - match: { _source.goodbye: "everybody" } - match: { _source.hello: "world" } + +--- +"Test KV Processor with template snippets": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + get: + index: test + id: 1 + - match: { _source.zoo.goodbye: "everybody" } + - match: { _source.zoo.hello: "world" } + +--- +"Test KV Processor with non-existing field and without ignore_missing": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[unknown\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "unknown", + target: "zoo", + foo: "goodbye=everybody hello=world" + } + +--- +"Test KV Processor with non-existing field and ignore_missing": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=", + "ignore_missing": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { target: "zoo", foo: "goodbye=everybody hello=world"}} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml index 916a7fe656cc2..d90e5fbf2362b 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -84,3 +84,38 @@ teardown: } ] } + +--- +"Test dissect processor can match empty brackets": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "[%{a}][%{b}][%{c}]" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {message: "[foo][bar][]"} + + - do: + get: + index: test + id: 1 + - match: { _source.message: "[foo][bar][]" } + - match: { _source.a: "foo" } + - match: { _source.b: "bar" } + - match: { _source.c: "" } diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index bb2f652168d5c..8ca28a905f216 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -504,10 +504,6 @@ public void testInvalidFieldMember() { } public void testSpecialValueVariable() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10079", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); // i.e. _value for aggregations createIndex("test"); ensureGreen("test"); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java index 5eebb9c4d60ad..a2af636ffdc8a 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java @@ -53,9 +53,9 @@ class ExpressionAggregationScript implements AggregationScript.LeafFactory { final SimpleBindings bindings; final DoubleValuesSource source; final boolean needsScore; - final ReplaceableConstDoubleValueSource specialValue; // _value + final PerThreadReplaceableConstDoubleValueSource specialValue; // _value - ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, ReplaceableConstDoubleValueSource v) { + ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, PerThreadReplaceableConstDoubleValueSource v) { exprScript = e; bindings = b; source = exprScript.getDoubleValuesSource(bindings); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 11b2e20eea523..5629b3b4a6972 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -316,14 +316,14 @@ private static AggregationScript.LeafFactory newAggregationScript( // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); boolean needsScores = false; - ReplaceableConstDoubleValueSource specialValue = null; + PerThreadReplaceableConstDoubleValueSource specialValue = null; for (String variable : expr.variables) { try { if (variable.equals("_score")) { bindings.add("_score", DoubleValuesSource.SCORES); needsScores = true; } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); + specialValue = new PerThreadReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a @@ -388,7 +388,7 @@ private static ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLoo // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); - ReplaceableConstDoubleValueSource specialValue = null; + PerThreadReplaceableConstDoubleValueSource specialValue = null; boolean needsScores = false; for (String variable : expr.variables) { try { @@ -396,7 +396,7 @@ private static ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLoo bindings.add("_score", DoubleValuesSource.SCORES); needsScores = true; } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); + specialValue = new PerThreadReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java similarity index 62% rename from modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java rename to modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java index 28e4707a07192..40bb957c248f2 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java @@ -39,20 +39,25 @@ import org.apache.lucene.search.IndexSearcher; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** - * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. + * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. This is made + * thread-safe for concurrent segment search use case by keeping the {@link DoubleValues} per thread. Any update to the value happens in + * thread specific {@link DoubleValuesSource} instance. */ -final class ReplaceableConstDoubleValueSource extends DoubleValuesSource { - final ReplaceableConstDoubleValues fv; +final class PerThreadReplaceableConstDoubleValueSource extends DoubleValuesSource { + // Multiple slices can be processed by same thread but that will be sequential, so keeping per thread is fine + final Map perThreadDoubleValues; - ReplaceableConstDoubleValueSource() { - fv = new ReplaceableConstDoubleValues(); + PerThreadReplaceableConstDoubleValueSource() { + perThreadDoubleValues = new ConcurrentHashMap<>(); } @Override public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException { - return fv; + return perThreadDoubleValues.computeIfAbsent(Thread.currentThread().getId(), threadId -> new ReplaceableConstDoubleValues()); } @Override @@ -62,7 +67,11 @@ public boolean needsScores() { @Override public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { - if (fv.advanceExact(docId)) return Explanation.match((float) fv.doubleValue(), "ReplaceableConstDoubleValues"); + final ReplaceableConstDoubleValues currentFv = perThreadDoubleValues.computeIfAbsent( + Thread.currentThread().getId(), + threadId -> new ReplaceableConstDoubleValues() + ); + if (currentFv.advanceExact(docId)) return Explanation.match((float) currentFv.doubleValue(), "ReplaceableConstDoubleValues"); else return Explanation.noMatch("ReplaceableConstDoubleValues"); } @@ -77,7 +86,11 @@ public int hashCode() { } public void setValue(double v) { - fv.setValue(v); + final ReplaceableConstDoubleValues currentFv = perThreadDoubleValues.computeIfAbsent( + Thread.currentThread().getId(), + threadId -> new ReplaceableConstDoubleValues() + ); + currentFv.setValue(v); } @Override diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java deleted file mode 100644 index 604c233ca49c4..0000000000000 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.refresh.RefreshResponse; -import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.Segment; -import org.opensearch.index.reindex.BulkByScrollResponse; -import org.opensearch.index.reindex.ReindexAction; -import org.opensearch.index.reindex.ReindexModulePlugin; -import org.opensearch.index.reindex.ReindexRequestBuilder; -import org.opensearch.index.reindex.ReindexTestCase; -import org.opensearch.plugins.Plugin; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.stream.Collectors.toList; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - -public class MultiCodecReindexIT extends ReindexTestCase { - - @Override - protected Collection> nodePlugins() { - return List.of(ReindexModulePlugin.class); - } - - public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { - internalCluster().ensureAtLeastNumDataNodes(1); - Map codecMap = Map.of( - "best_compression", - "BEST_COMPRESSION", - "zlib", - "BEST_COMPRESSION", - "default", - "BEST_SPEED", - "lz4", - "BEST_SPEED" - ); - - for (Map.Entry codec : codecMap.entrySet()) { - assertReindexingWithMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); - } - - } - - private void assertReindexingWithMultipleCodecs(String destCodec, String destCodecMode, Map codecMap) - throws ExecutionException, InterruptedException { - - final String index = "test-index" + destCodec; - final String destIndex = "dest-index" + destCodec; - - // creating source index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", "default") - .put("index.merge.policy.max_merged_segment", "1b") - .build() - ); - ensureGreen(index); - - final int nbDocs = randomIntBetween(2, 5); - - // indexing with all 4 codecs - for (Map.Entry codec : codecMap.entrySet()) { - useCodec(index, codec.getKey()); - ingestDocs(index, nbDocs); - } - - assertTrue( - getSegments(index).stream() - .flatMap(s -> s.getAttributes().values().stream()) - .collect(Collectors.toSet()) - .containsAll(codecMap.values()) - ); - - // creating destination index with destination codec - createIndex( - destIndex, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", destCodec) - .build() - ); - - BulkByScrollResponse bulkResponse = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source(index) - .destination(destIndex) - .refresh(true) - .waitForActiveShards(ActiveShardCount.ONE) - .get(); - - assertEquals(codecMap.size() * nbDocs, bulkResponse.getCreated()); - assertEquals(codecMap.size() * nbDocs, bulkResponse.getTotal()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); - } - - private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); - - assertAcked( - client().admin() - .indices() - .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); - } - - private void flushAndRefreshIndex(String index) { - - // Request is not blocked - for (String blockSetting : Arrays.asList( - SETTING_BLOCKS_READ, - SETTING_BLOCKS_WRITE, - SETTING_READ_ONLY, - SETTING_BLOCKS_METADATA, - SETTING_READ_ONLY_ALLOW_DELETE - )) { - try { - enableIndexBlock(index, blockSetting); - // flush - FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); - assertNoFailures(flushResponse); - - // refresh - RefreshResponse refreshResponse = client().admin().indices().prepareRefresh(index).execute().actionGet(); - assertNoFailures(refreshResponse); - } finally { - disableIndexBlock(index, blockSetting); - } - } - } - - private void ingestDocs(String index, int nbDocs) throws InterruptedException { - - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) - .collect(toList()) - ); - flushAndRefreshIndex(index); - } - - private ArrayList getSegments(String index) { - - return new ArrayList<>( - client().admin() - .indices() - .segments(new IndicesSegmentsRequest(index)) - .actionGet() - .getIndices() - .get(index) - .getShards() - .get(0) - .getShards()[0].getSegments() - ); - } - -} diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java new file mode 100644 index 0000000000000..53a0545fd2ff7 --- /dev/null +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.reindex; + +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.MergePolicyProvider; +import org.opensearch.index.engine.Segment; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class MultiCodecReindexTests extends ReindexTestCase { + final static Map codecMap = Map.of( + "best_compression", + "BEST_COMPRESSION", + "zlib", + "BEST_COMPRESSION", + "default", + "BEST_SPEED", + "lz4", + "BEST_SPEED" + ); + final static String[] codecChoices = codecMap.keySet().toArray(String[]::new); + + @Override + protected Collection> nodePlugins() { + return List.of(InternalSettingsPlugin.class, ReindexModulePlugin.class); + } + + public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { + for (Map.Entry candidate : codecMap.entrySet()) { + final int nbDocs = randomIntBetween(2, 5); + + final String destCodec = candidate.getKey(); + final String destCodecMode = candidate.getValue(); + + final String index = "test-index-" + destCodec; + final String destIndex = "dest-index-" + destCodec; + + // create source index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(codecChoices)) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) + .build() + ); + ensureGreen(index); + + // index using all codecs + for (String codec : codecMap.keySet()) { + useCodec(index, codec); + ingestDocs(index, nbDocs); + } + + assertTrue( + getSegments(index).stream() + .flatMap(s -> s.getAttributes().values().stream()) + .collect(Collectors.toSet()) + .containsAll(codecMap.values()) + ); + + // create destination index with destination codec + createIndex( + destIndex, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", destCodec) + .build() + ); + ensureGreen(destIndex); + + // perform reindex + BulkByScrollResponse response = reindex().source(index) + .destination(destIndex) + .refresh(true) + .waitForActiveShards(ActiveShardCount.ONE) + .get(); + final int expectedResponseSize = codecMap.size() * nbDocs; + + // assertions + assertEquals(0, response.getNoops()); + assertEquals(1, response.getBatches()); + assertEquals(0, response.getDeleted()); + assertEquals(0, response.getVersionConflicts()); + assertEquals(0, response.getBulkFailures().size()); + assertEquals(0, response.getSearchFailures().size()); + + assertEquals(expectedResponseSize, response.getTotal()); + assertEquals(expectedResponseSize, response.getCreated()); + + assertTrue(response.getTook().getMillis() > 0); + assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); + } + } + + private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); + + assertAcked( + client().admin() + .indices() + .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); + } + + private void ingestDocs(String index, int nbDocs) throws InterruptedException { + indexRandom( + randomBoolean(), + false, + randomBoolean(), + IntStream.range(0, nbDocs) + .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) + .collect(toList()) + ); + + flushAndRefresh(index); + } + + private ArrayList getSegments(String index) { + return new ArrayList<>( + client().admin() + .indices() + .segments(new IndicesSegmentsRequest(index)) + .actionGet() + .getIndices() + .get(index) + .getShards() + .get(0) + .getShards()[0].getSegments() + ); + } +} diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index a83330356e35e..6475a0b744c60 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -40,6 +40,7 @@ import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelPipeline; @@ -98,6 +99,22 @@ public Channel getNettyChannel() { return channel; } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + Object handler = getNettyChannel().pipeline().get(name); + + if (handler == null && inboundPipeline() != null) { + handler = inboundPipeline().get(name); + } + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + @Override public String toString() { return "Netty4HttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 1677f333a4b1c..4970c42163ac3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -116,6 +116,9 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; import static org.opensearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +/** + * The HTTP transport implementations based on Netty 4. + */ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class); @@ -184,6 +187,17 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private volatile ServerBootstrap serverBootstrap; private volatile SharedGroupFactory.SharedGroup sharedGroup; + /** + * Creates new HTTP transport implementations based on Netty 4 + * @param settings seetings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + */ public Netty4HttpServerTransport( Settings settings, NetworkService networkService, diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java index 5db1f7c333157..79a5bf9e95121 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java @@ -41,6 +41,7 @@ import org.opensearch.transport.TransportException; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -164,6 +165,18 @@ public void sendMessage(BytesReference reference, ActionListener listener) } } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + final Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + public Channel getNettyChannel() { return channel; } diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt b/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index ddfb99e626718..a14215aa7655b 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -10,13 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.shiro.SecurityUtils; import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; import org.opensearch.common.Randomness; import org.opensearch.identity.IdentityService; import org.opensearch.identity.Subject; -import org.opensearch.identity.noop.NoopSubject; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.BasicAuthToken; import org.opensearch.identity.tokens.OnBehalfOfClaims; @@ -88,20 +86,6 @@ public AuthToken issueServiceAccountToken(String audience) { return token; } - @Override - public Subject authenticateToken(AuthToken authToken) { - return new NoopSubject(); - } - - public boolean validateToken(AuthToken token) { - if (token instanceof BasicAuthToken) { - final BasicAuthToken basicAuthToken = (BasicAuthToken) token; - return basicAuthToken.getUser().equals(SecurityUtils.getSubject().toString()) - && basicAuthToken.getPassword().equals(shiroTokenPasswordMap.get(basicAuthToken)); - } - return false; - } - public String getTokenInfo(AuthToken token) { if (token instanceof BasicAuthToken) { final BasicAuthToken basicAuthToken = (BasicAuthToken) token; diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java index db77ced298991..f99484083e2fb 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java @@ -100,11 +100,6 @@ public void testShouldFailGetTokenInfo() { assertThrows(UnsupportedAuthenticationToken.class, () -> shiroAuthTokenHandler.getTokenInfo(bearerAuthToken)); } - public void testShouldFailValidateToken() { - final BearerAuthToken bearerAuthToken = new BearerAuthToken("header.payload.signature"); - assertFalse(shiroAuthTokenHandler.validateToken(bearerAuthToken)); - } - public void testShoudPassMapLookupWithToken() { final BasicAuthToken authToken = new BasicAuthToken("Basic dGVzdDp0ZTpzdA=="); shiroAuthTokenHandler.getShiroTokenPasswordMap().put(authToken, "te:st"); diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 330a17c02bc7a..8945c09fca28b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 26e2b4813b8a5..51f2057b4bedb 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,11 +56,8 @@ dependencies { api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.23.0' - api 'org.reactivestreams:reactive-streams:1.0.4' - api 'io.projectreactor:reactor-core:3.5.6' - api 'io.projectreactor.netty:reactor-netty:1.1.8' - api 'io.projectreactor.netty:reactor-netty-core:1.1.8' - api 'io.projectreactor.netty:reactor-netty-http:1.1.9' + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" @@ -101,10 +98,6 @@ thirdPartyAudit { 'com.azure.storage.internal.avro.implementation.schema.AvroSchema', 'com.ctc.wstx.shaded.msv_core.driver.textui.Driver', 'io.micrometer.context.ContextAccessor', - 'io.micrometer.context.ContextRegistry', - 'io.micrometer.context.ContextSnapshot', - 'io.micrometer.context.ContextSnapshot$Scope', - 'io.micrometer.core.instrument.Clock', 'io.micrometer.core.instrument.Counter', 'io.micrometer.core.instrument.Counter$Builder', 'io.micrometer.core.instrument.DistributionSummary', @@ -114,14 +107,10 @@ thirdPartyAudit { 'io.micrometer.core.instrument.Meter', 'io.micrometer.core.instrument.MeterRegistry', 'io.micrometer.core.instrument.Metrics', - 'io.micrometer.core.instrument.Tag', - 'io.micrometer.core.instrument.Tags', 'io.micrometer.core.instrument.Timer', 'io.micrometer.core.instrument.Timer$Builder', 'io.micrometer.core.instrument.Timer$Sample', - 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', - 'io.micrometer.core.instrument.search.Search', 'io.netty.channel.epoll.Epoll', 'io.netty.channel.epoll.EpollDatagramChannel', 'io.netty.channel.epoll.EpollServerSocketChannel', @@ -168,9 +157,6 @@ thirdPartyAudit { 'org.slf4j.impl.StaticLoggerBinder', 'org.slf4j.impl.StaticMDCBinder', 'org.slf4j.impl.StaticMarkerBinder', - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration', - 'io.micrometer.context.ThreadLocalAccessor', 'io.micrometer.common.KeyValue', 'io.micrometer.common.KeyValues', 'io.micrometer.common.docs.KeyName', @@ -190,6 +176,7 @@ thirdPartyAudit { 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', 'io.micrometer.tracing.propagation.Propagator', 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.core.instrument.Tags', 'io.micrometer.observation.ObservationHandler', 'io.micrometer.observation.ObservationRegistry', 'io.micrometer.observation.ObservationRegistry$ObservationConfig', @@ -210,8 +197,7 @@ thirdPartyAudit { 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' ) } diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 deleted file mode 100644 index 18c388b84f333..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7e9038dee5c1adb1ebd07d3669e0e1182ac5b60 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..f0d165ff7cf82 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 @@ -0,0 +1 @@ +f3cdb002e0f2f30ad9c5fd053d78b1a485511ab1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 deleted file mode 100644 index 6aa4f9b99c274..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30d16ec2aef6d8094c5e2dce1d95034ca8b6cb42 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..40379694f5ea5 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 @@ -0,0 +1 @@ +77e3a27823f795d928b897d8444744ddb044a5c3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 deleted file mode 100644 index 80da08928f855..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a22fd1c7b0f9788e81eea32c11dc8c1ba421f18 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..820d14b3df8e4 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +684daae9ea45087c670b4f6511edcfdb19c3a695 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..0789dea62778a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +24d0f4f2b0b0b55acc498d62f9c0921d0ea6da7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6c1112ed49775..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21c76a42a468faafac6c84f8aca775073fc8e345 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fb2d1aa25a5ec --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +cbaac9e2c2d7b86e4c1bda220cde49949c9bcaee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 deleted file mode 100644 index 717703c36e1ab..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -259bf1c5178c3e23bb89a2fab59b6d22846e3fa6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..ca030677c32d0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c198ee61abba7ae6b3cb6b1033f04fb0ae79d512 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 deleted file mode 100644 index 3f69ae54c5d4a..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c8a89ea89b06e120c57bdb3db14b9a47ca30bb3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..450c34d975bb9 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d3f503d7554cf25edc6b83b9b4724b240c6e91d5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 deleted file mode 100644 index adef44a4e7da7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -804d8b752847923d3bb81f24de604597047c9b2e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt b/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 deleted file mode 100644 index ad9b7263e7b38..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -027fdc551537b349389176a23a192f11a7a3d7de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 deleted file mode 100644 index 6b6bf1903b16c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d53a9d7d0395285f4c81664494fcd61477626e32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..352d69396d0c9 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 @@ -0,0 +1 @@ +378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 deleted file mode 100644 index 707631f4dfe0c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48999c4ae27cdcee5eaff9dfd150a8b64624f0f5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..1bcb0e0c52950 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 @@ -0,0 +1 @@ +e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 deleted file mode 100644 index 96deead2c75d1..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -408b3037133f2e8ab0f195ccd3f807026be9b860 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index da4978608a12f..1bef5146f1db9 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -75,8 +75,8 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.2' - api 'com.google.http-client:google-http-client-appengine:1.43.2' + api 'com.google.http-client:google-http-client:1.43.3' + api 'com.google.http-client:google-http-client-appengine:1.43.3' api 'com.google.http-client:google-http-client-gson:1.43.3' api 'com.google.http-client:google-http-client-jackson2:1.43.3' diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 deleted file mode 100644 index a576a74c62542..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2520469ebd8c0675f0d2aeafd2da665228320fcf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..800467de8bdf3 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 @@ -0,0 +1 @@ +a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 deleted file mode 100644 index d8a9dba20070b..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fb548c5264227813fd83991b94a705b0841c15f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..4adcca6a55902 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 @@ -0,0 +1 @@ +09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 445e1d65f3d3e..c9ebb3acaf3e5 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -228,7 +228,7 @@ StorageOptions createStorageOptions( } storageOptionsBuilder.setCredentials(serviceAccountCredentials); } - return storageOptionsBuilder.build(); + return SocketAccess.doPrivilegedException(() -> storageOptionsBuilder.build()); } /** diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 197e772df30d5..35127d6ea4060 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.gcs; +import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.SpecialPermission; import org.opensearch.common.CheckedRunnable; @@ -71,4 +72,16 @@ public static void doPrivilegedVoidIOException(CheckedRunnable acti throw (IOException) e.getCause(); } } + + public static T doPrivilegedException(PrivilegedExceptionAction operation) { + SpecialPermission.check(); + try { + return AccessController.doPrivileged(operation); + } catch (PrivilegedActionException e) { + Throwables.rethrow(e.getCause()); + assert false : "always throws"; + return null; + } + } + } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 1ce29afe0e69a..ed1f54888a26f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -84,7 +84,7 @@ dependencies { api 'net.minidev:json-smart:2.5.0' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - implementation 'org.codehaus.woodstox:stax2-api:4.2.1' + implementation 'org.codehaus.woodstox:stax2-api:4.2.2' hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 deleted file mode 100644 index 800a4aa87ba0e..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ab4f082fd162f60afcaf2b8744a3d959feab3e8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..0e22f98daa61c --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 @@ -0,0 +1 @@ +911fdb5b1a1df36719c579ecc6f2957b88bce1ab \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..093ced04bee4b --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +2761d380bc97e6ce64745dd9ca04538cedd40a5b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 deleted file mode 100644 index 0756635018837..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a45aa70bc50d0500da5cdcd595cc838d87ada987 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 44fd45b265e82..560d12d14395d 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -70,7 +70,6 @@ dependencies { api "software.amazon.awssdk:sts:${versions.aws}" api "software.amazon.awssdk:netty-nio-client:${versions.aws}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..a710d964c1803 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +f86303830ab65680e45cabb531e37078ecc6791e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 deleted file mode 100644 index 75b64ad4197d8..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -961bd5b8d97ea6a07168176462f398089a24b5c8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt b/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt b/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 5f88ad7867513..da2c6e8c1b0ee 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -37,6 +37,8 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -51,10 +53,15 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; +import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; @@ -63,12 +70,18 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.StreamSupport; import fixture.s3.S3HttpHandler; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -152,6 +165,67 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10735") + @Override + public void testRequestStats() throws Exception { + final String repository = createRepository(randomName()); + final String index = "index-no-merges"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + final long nbDocs = randomLongBetween(10_000L, 20_000L); + try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) { + waitForDocs(nbDocs, indexer); + } + + flushAndRefresh(index); + ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + final String snapshot = "snapshot"; + assertSuccessfulSnapshot( + client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); + + assertAcked(client().admin().indices().prepareDelete(index)); + + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + ensureGreen(index); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get()); + + final RepositoryStats repositoryStats = StreamSupport.stream( + internalCluster().getInstances(RepositoriesService.class).spliterator(), + false + ).map(repositoriesService -> { + try { + return repositoriesService.repository(repository); + } catch (RepositoryMissingException e) { + return null; + } + }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + + Map> extendedStats = repositoryStats.extendedStats; + Map aggregatedStats = new HashMap<>(); + extendedStats.forEach((k, v) -> { + if (k == BlobStore.Metric.RETRY_COUNT || k == BlobStore.Metric.REQUEST_SUCCESS || k == BlobStore.Metric.REQUEST_FAILURE) { + for (Map.Entry entry : v.entrySet()) { + aggregatedStats.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + } + + }); + final Map mockCalls = getMockRequestCounts(); + + String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", aggregatedStats, mockCalls); + + assertEquals(assertionErrorMsg, mockCalls, aggregatedStats); + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -175,7 +249,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override public BlobStore blobStore() { @@ -263,6 +337,8 @@ public void maybeTrack(final String request, Headers requestHeaders) { trackRequest("PutMultipartObject"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutObject"); + } else if (Regex.simpleMatch("POST /*?delete*", request)) { + trackRequest("DeleteObjects"); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java index 0b5fcb6df280e..45170ea1ad209 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java @@ -29,6 +29,7 @@ public class AmazonAsyncS3Reference extends RefCountedReleasable { client.client().close(); client.priorityClient().close(); + client.urgentClient().close(); AwsCredentialsProvider credentials = client.credentials(); if (credentials instanceof Closeable) { try { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java index fa2db83729d25..f8a313b55d945 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java @@ -19,16 +19,19 @@ final class AmazonAsyncS3WithCredentials { private final S3AsyncClient client; private final S3AsyncClient priorityClient; + private final S3AsyncClient urgentClient; private final AwsCredentialsProvider credentials; private AmazonAsyncS3WithCredentials( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { this.client = client; this.credentials = credentials; this.priorityClient = priorityClient; + this.urgentClient = urgentClient; } S3AsyncClient client() { @@ -39,6 +42,10 @@ S3AsyncClient priorityClient() { return priorityClient; } + S3AsyncClient urgentClient() { + return urgentClient; + } + AwsCredentialsProvider credentials() { return credentials; } @@ -46,8 +53,9 @@ AwsCredentialsProvider credentials() { static AmazonAsyncS3WithCredentials create( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { - return new AmazonAsyncS3WithCredentials(client, priorityClient, credentials); + return new AmazonAsyncS3WithCredentials(client, priorityClient, urgentClient, credentials); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 08215ebdd45e0..262304029a0d3 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -103,6 +103,7 @@ public synchronized void refreshAndClearCache(Map clie */ public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -119,7 +120,7 @@ public AmazonAsyncS3Reference client( return existing; } final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( - buildClient(clientSettings, priorityExecutorBuilder, normalExecutorBuilder) + buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); clientReference.incRef(); clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); @@ -165,6 +166,7 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { // proxy for testing synchronized AmazonAsyncS3WithCredentials buildClient( final S3ClientSettings clientSettings, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -195,6 +197,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( builder.forcePathStyle(true); } + builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.asyncConfiguration( + ClientAsyncConfiguration.builder() + .advancedOption( + SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, + urgentExecutorBuilder.getFutureCompletionExecutor() + ) + .build() + ); + final S3AsyncClient urgentClient = SocketAccess.doPrivileged(builder::build); + builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup())); builder.asyncConfiguration( ClientAsyncConfiguration.builder() @@ -217,7 +230,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient client = SocketAccess.doPrivileged(builder::build); - return AmazonAsyncS3WithCredentials.create(client, priorityClient, credentials); + return AmazonAsyncS3WithCredentials.create(client, priorityClient, urgentClient, credentials); } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 9777bd974d56c..c1180aab0e0c7 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -195,11 +195,16 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { - S3AsyncClient s3AsyncClient = writeContext.getWritePriority() == WritePriority.HIGH - ? amazonS3Reference.get().priorityClient() - : amazonS3Reference.get().client(); + S3AsyncClient s3AsyncClient; + if (writeContext.getWritePriority() == WritePriority.URGENT) { + s3AsyncClient = amazonS3Reference.get().urgentClient(); + } else if (writeContext.getWritePriority() == WritePriority.HIGH) { + s3AsyncClient = amazonS3Reference.get().priorityClient(); + } else { + s3AsyncClient = amazonS3Reference.get().client(); + } CompletableFuture completableFuture = blobStore.getAsyncTransferManager() - .uploadObject(s3AsyncClient, uploadRequest, streamContext); + .uploadObject(s3AsyncClient, uploadRequest, streamContext, blobStore.getStatsMetricPublisher()); completableFuture.whenComplete((response, throwable) -> { if (throwable == null) { completionListener.onResponse(response); @@ -384,7 +389,7 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx assert outstanding.isEmpty(); } - private static DeleteObjectsRequest bulkDelete(String bucket, List blobs) { + private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { return DeleteObjectsRequest.builder() .bucket(bucket) .delete( @@ -393,6 +398,7 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs .quiet(true) .build() ) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) .build(); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 80005d92344a4..e8e043357e126 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -47,6 +47,8 @@ import org.opensearch.repositories.s3.async.AsyncTransferManager; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -82,6 +84,7 @@ class S3BlobStore implements BlobStore { private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher(); private final AsyncTransferManager asyncTransferManager; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final boolean multipartUploadEnabled; @@ -98,6 +101,7 @@ class S3BlobStore implements BlobStore { int bulkDeletesSize, RepositoryMetadata repositoryMetadata, AsyncTransferManager asyncTransferManager, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -114,6 +118,7 @@ class S3BlobStore implements BlobStore { this.asyncTransferManager = asyncTransferManager; this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; + this.urgentExecutorBuilder = urgentExecutorBuilder; } @Override @@ -137,7 +142,7 @@ public AmazonS3Reference clientReference() { } public AmazonAsyncS3Reference asyncClientReference() { - return s3AsyncService.client(repositoryMetadata, priorityExecutorBuilder, normalExecutorBuilder); + return s3AsyncService.client(repositoryMetadata, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder); } int getMaxRetries() { @@ -180,6 +185,16 @@ public Map stats() { return statsMetricPublisher.getStats().toMap(); } + @Override + public Map> extendedStats() { + if (statsMetricPublisher.getExtendedStats() == null || statsMetricPublisher.getExtendedStats().isEmpty()) { + return Collections.emptyMap(); + } + Map> extendedStats = new HashMap<>(); + statsMetricPublisher.getExtendedStats().forEach((k, v) -> extendedStats.put(k, v.toMap())); + return extendedStats; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index aaf5b79891cdc..728a99b1220a6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -234,6 +234,7 @@ class S3Repository extends MeteredBlobStoreRepository { private final AsyncTransferManager asyncUploadUtils; private final S3AsyncService s3AsyncService; private final boolean multipartUploadEnabled; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final Path pluginConfigPath; @@ -248,6 +249,7 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, @@ -260,6 +262,7 @@ class S3Repository extends MeteredBlobStoreRepository { clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, @@ -278,6 +281,7 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, @@ -290,6 +294,7 @@ class S3Repository extends MeteredBlobStoreRepository { this.multipartUploadEnabled = multipartUploadEnabled; this.pluginConfigPath = pluginConfigPath; this.asyncUploadUtils = asyncUploadUtils; + this.urgentExecutorBuilder = urgentExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.normalExecutorBuilder = normalExecutorBuilder; @@ -352,6 +357,7 @@ protected S3BlobStore createBlobStore() { bulkDeletesSize, metadata, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder ); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index a80ee0ca35fae..9ed232464d080 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -55,6 +56,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.FixedExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -73,6 +75,9 @@ * A plugin to add a repository type that writes to and from the AWS S3. */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + + private static final String URGENT_FUTURE_COMPLETION = "urgent_future_completion"; + private static final String URGENT_STREAM_READER = "urgent_stream_reader"; private static final String PRIORITY_FUTURE_COMPLETION = "priority_future_completion"; private static final String PRIORITY_STREAM_READER = "priority_stream_reader"; private static final String FUTURE_COMPLETION = "future_completion"; @@ -83,6 +88,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private final Path configPath; + private AsyncExecutorContainer urgentExecutorBuilder; private AsyncExecutorContainer priorityExecutorBuilder; private AsyncExecutorContainer normalExecutorBuilder; @@ -93,17 +99,25 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); + int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) + new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) ); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_STREAM_READER, priorityPoolCount(settings), 10_000, PRIORITY_STREAM_READER) + new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) ); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new FixedExecutorBuilder(settings, STREAM_READER, normalPoolCount(settings), 10_000, STREAM_READER)); + executorBuilders.add(new ScalingExecutorBuilder(STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); return executorBuilders; } + static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { + return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + } + S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { this.service = Objects.requireNonNull(service, "S3 service must not be null"); this.configPath = configPath; @@ -122,6 +136,10 @@ private static int allocatedProcessors(Settings settings) { return OpenSearchExecutors.allocatedProcessors(settings); } + private static int urgentPoolCount(Settings settings) { + return boundedBy((allocatedProcessors(settings) + 7) / 8, 1, 2); + } + private static int priorityPoolCount(Settings settings) { return boundedBy((allocatedProcessors(settings) + 1) / 2, 2, 4); } @@ -144,8 +162,14 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier ) { + int urgentEventLoopThreads = urgentPoolCount(clusterService.getSettings()); int priorityEventLoopThreads = priorityPoolCount(clusterService.getSettings()); int normalEventLoopThreads = normalPoolCount(clusterService.getSettings()); + this.urgentExecutorBuilder = new AsyncExecutorContainer( + threadPool.executor(URGENT_FUTURE_COMPLETION), + threadPool.executor(URGENT_STREAM_READER), + new AsyncTransferEventLoopGroup(urgentEventLoopThreads) + ); this.priorityExecutorBuilder = new AsyncExecutorContainer( threadPool.executor(PRIORITY_FUTURE_COMPLETION), threadPool.executor(PRIORITY_STREAM_READER), @@ -170,7 +194,8 @@ protected S3Repository createRepository( AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), normalExecutorBuilder.getStreamReader(), - priorityExecutorBuilder.getStreamReader() + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader() ); return new S3Repository( metadata, @@ -179,6 +204,7 @@ protected S3Repository createRepository( clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index cad0037f99249..0c63bfdb1ff97 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -8,10 +8,13 @@ package org.opensearch.repositories.s3; -import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import org.opensearch.common.blobstore.BlobStore; + +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -20,18 +23,67 @@ public class StatsMetricPublisher { private final Stats stats = new Stats(); + private final Map extendedStats = new HashMap<>() { + { + put(BlobStore.Metric.REQUEST_LATENCY, new Stats()); + put(BlobStore.Metric.REQUEST_SUCCESS, new Stats()); + put(BlobStore.Metric.REQUEST_FAILURE, new Stats()); + put(BlobStore.Metric.RETRY_COUNT, new Stats()); + } + }; + public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.listCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).listMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).listMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).listMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).listMetrics.addAndGet(1); + } + stats.listMetrics.addAndGet(1); + break; + } + } + } + + @Override + public void close() {} + }; + + public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).deleteMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).deleteMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).deleteMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).deleteMetrics.addAndGet(1); + } + stats.deleteMetrics.addAndGet(1); + break; + } + } } @Override @@ -41,15 +93,26 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.getCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).getMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).getMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).getMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).getMetrics.addAndGet(1); + } + stats.getMetrics.addAndGet(1); + break; + } + } } @Override @@ -59,15 +122,26 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.putCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).putMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).putMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).putMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).putMetrics.addAndGet(1); + } + stats.putMetrics.addAndGet(1); + break; + } + } } @Override @@ -77,15 +151,26 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.postCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).multiPartPutMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).multiPartPutMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).multiPartPutMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).multiPartPutMetrics.addAndGet(1); + } + stats.multiPartPutMetrics.addAndGet(1); + break; + } + } } @Override @@ -96,22 +181,29 @@ public Stats getStats() { return stats; } + public Map getExtendedStats() { + return extendedStats; + } + static class Stats { - final AtomicLong listCount = new AtomicLong(); + final AtomicLong listMetrics = new AtomicLong(); + + final AtomicLong getMetrics = new AtomicLong(); - final AtomicLong getCount = new AtomicLong(); + final AtomicLong putMetrics = new AtomicLong(); - final AtomicLong putCount = new AtomicLong(); + final AtomicLong deleteMetrics = new AtomicLong(); - final AtomicLong postCount = new AtomicLong(); + final AtomicLong multiPartPutMetrics = new AtomicLong(); Map toMap() { final Map results = new HashMap<>(); - results.put("GetObject", getCount.get()); - results.put("ListObjects", listCount.get()); - results.put("PutObject", putCount.get()); - results.put("PutMultipartObject", postCount.get()); + results.put("GetObject", getMetrics.get()); + results.put("ListObjects", listMetrics.get()); + results.put("PutObject", putMetrics.get()); + results.put("DeleteObjects", deleteMetrics.get()); + results.put("PutMultipartObject", multiPartPutMetrics.get()); return results; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index ad6939ce299d6..933ee6dc29513 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -23,10 +23,13 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -45,6 +48,7 @@ public class AsyncPartsHandler { * @param s3AsyncClient S3 client to use for upload * @param executorService Thread pool for regular upload * @param priorityExecutorService Thread pool for priority uploads + * @param urgentExecutorService Thread pool for urgent uploads * @param uploadRequest request for upload * @param streamContext Stream context used in supplying individual file parts * @param uploadId Upload Id against which multi-part is being performed @@ -57,6 +61,7 @@ public static List> uploadParts( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, UploadRequest uploadRequest, StreamContext streamContext, String uploadId, @@ -80,6 +85,7 @@ public static List> uploadParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, completedParts, inputStreamContainers, futures, @@ -126,6 +132,7 @@ private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, AtomicReferenceArray completedParts, AtomicReferenceArray inputStreamContainers, List> futures, @@ -135,29 +142,47 @@ private static void uploadPart( ) { Integer partNumber = uploadPartRequest.partNumber(); - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ) ); - CompletableFuture convertFuture = uploadPartResponseFuture.thenApply( - uploadPartResponse -> convertUploadPartResponse( - completedParts, - inputStreamContainers, - uploadPartResponse, - partNumber, - uploadRequest.doRemoteDataIntegrityCheck() - ) - ); + CompletableFuture convertFuture = uploadPartResponseFuture.whenComplete((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException ex) { + log.error( + () -> new ParameterizedMessage( + "Failed to close stream while uploading a part of idx {} and file {}.", + uploadPartRequest.partNumber(), + uploadPartRequest.key() + ), + ex + ); + } + }) + .thenApply( + uploadPartResponse -> convertUploadPartResponse( + completedParts, + inputStreamContainers, + uploadPartResponse, + partNumber, + uploadRequest.doRemoteDataIntegrityCheck() + ) + ); futures.add(convertFuture); CompletableFutureUtils.forwardExceptionTo(convertFuture, uploadPartResponseFuture); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 8d45c2167a3d1..4f1ab9764702e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -35,9 +35,12 @@ import org.opensearch.common.util.ByteUtils; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -58,6 +61,7 @@ public final class AsyncTransferManager { private static final Logger log = LogManager.getLogger(AsyncTransferManager.class); private final ExecutorService executorService; private final ExecutorService priorityExecutorService; + private final ExecutorService urgentExecutorService; private final long minimumPartSize; /** @@ -72,10 +76,16 @@ public final class AsyncTransferManager { * @param executorService The stream reader {@link ExecutorService} for normal priority uploads * @param priorityExecutorService The stream read {@link ExecutorService} for high priority uploads */ - public AsyncTransferManager(long minimumPartSize, ExecutorService executorService, ExecutorService priorityExecutorService) { + public AsyncTransferManager( + long minimumPartSize, + ExecutorService executorService, + ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService + ) { this.executorService = executorService; this.priorityExecutorService = priorityExecutorService; this.minimumPartSize = minimumPartSize; + this.urgentExecutorService = urgentExecutorService; } /** @@ -86,16 +96,21 @@ public AsyncTransferManager(long minimumPartSize, ExecutorService executorServic * @param streamContext The {@link StreamContext} to supply streams during upload * @return A {@link CompletableFuture} to listen for upload completion */ - public CompletableFuture uploadObject(S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext) { + public CompletableFuture uploadObject( + S3AsyncClient s3AsyncClient, + UploadRequest uploadRequest, + StreamContext streamContext, + StatsMetricPublisher statsMetricPublisher + ) { CompletableFuture returnFuture = new CompletableFuture<>(); try { if (streamContext.getNumberOfParts() == 1) { log.debug(() -> "Starting the upload as a single upload part request"); - uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture); + uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture, statsMetricPublisher); } else { log.debug(() -> "Starting the upload as multipart upload request"); - uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture); + uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, statsMetricPublisher); } } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); @@ -108,12 +123,14 @@ private void uploadInParts( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { CreateMultipartUploadRequest.Builder createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder() .bucket(uploadRequest.getBucket()) - .key(uploadRequest.getKey()); + .key(uploadRequest.getKey()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)); if (uploadRequest.doRemoteDataIntegrityCheck()) { createMultipartUploadRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); } @@ -152,6 +169,7 @@ private void doUploadInParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, uploadRequest, streamContext, uploadId, @@ -286,28 +304,42 @@ private void uploadInOneChunk( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, InputStreamContainer inputStreamContainer, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { PutObjectRequest.Builder putObjectRequestBuilder = PutObjectRequest.builder() .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) - .contentLength(uploadRequest.getContentLength()); + .contentLength(uploadRequest.getContentLength()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.putObjectMetricPublisher)); if (uploadRequest.doRemoteDataIntegrityCheck()) { putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); } - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ).handle((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException e) { + log.error( + () -> new ParameterizedMessage("Failed to close stream while uploading single file {}.", uploadRequest.getKey()), + e + ); + } if (throwable != null) { Throwable unwrappedThrowable = ExceptionsHelper.unwrap(throwable, S3Exception.class); if (unwrappedThrowable != null) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index a4bfe11383b4f..8e1926d40302f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -302,7 +302,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java index e9fe557ab751a..de9ad46bb222d 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java @@ -44,12 +44,12 @@ public void testCachedClientsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); @@ -79,12 +79,12 @@ public void testCachedClientsWithCredentialsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 6eb8faa746d34..7c67519f2f3b0 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -266,10 +266,11 @@ public void verifySingleChunkUploadCallCount(boolean finalizeUploadFailure) { @Override public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { - return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, null)); + return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, asyncClient, null)); } } @@ -393,9 +394,11 @@ private S3BlobStore createBlobStore() { new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index a2214f5218991..ceab06bd051e9 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -221,9 +221,11 @@ protected AsyncMultiStreamBlobContainer createBlobContainer( new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ) ) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2701cae6a733b..58ad290a31e85 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -935,7 +935,7 @@ public void testReadBlobAsyncMultiPart() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); @@ -993,7 +993,7 @@ public void testReadBlobAsyncSinglePart() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -1048,7 +1048,7 @@ public void testReadBlobAsyncFailure() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); @@ -1091,7 +1091,7 @@ public void testReadBlobAsyncOnCompleteFailureMissingData() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); final S3BlobStore blobStore = mock(S3BlobStore.class); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index e65ca69a5047b..6fec535ae6301 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -168,6 +168,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { null, null, null, + null, false ) { @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 9c07b929052bc..2437547a80a6f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -33,12 +33,18 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.blobstore.ZeroInputStream; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -58,6 +64,7 @@ public void setUp() throws Exception { asyncTransferManager = new AsyncTransferManager( ByteSizeUnit.MB.toBytes(5), Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor() ); super.setUp(); @@ -70,17 +77,17 @@ public void testOneChunkUpload() { putObjectResponseCompletableFuture ); + AtomicReference streamRef = new AtomicReference<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing }, false, null), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 1 - ) + new StreamContext((partIdx, partSize, position) -> { + streamRef.set(new ZeroInputStream(partSize)); + return new InputStreamContainer(streamRef.get(), partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1), + new StatsMetricPublisher() ); try { @@ -90,6 +97,14 @@ public void testOneChunkUpload() { } verify(s3AsyncClient, times(1)).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); + + boolean closeError = false; + try { + streamRef.get().available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); } public void testOneChunkUploadCorruption() { @@ -118,7 +133,8 @@ public void testOneChunkUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1 - ) + ), + new StatsMetricPublisher() ); try { @@ -159,17 +175,18 @@ public void testMultipartUpload() { abortMultipartUploadResponseCompletableFuture ); + List streams = new ArrayList<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing }, true, 3376132981L), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 5 - ) + new StreamContext((partIdx, partSize, position) -> { + InputStream stream = new ZeroInputStream(partSize); + streams.add(stream); + return new InputStreamContainer(stream, partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5), + new StatsMetricPublisher() ); try { @@ -178,6 +195,16 @@ public void testMultipartUpload() { fail("did not expect resultFuture to fail"); } + streams.forEach(stream -> { + boolean closeError = false; + try { + stream.available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); + }); + verify(s3AsyncClient, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); verify(s3AsyncClient, times(5)).uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class)); verify(s3AsyncClient, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -219,7 +246,8 @@ public void testMultipartUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5 - ) + ), + new StatsMetricPublisher() ); try { diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 04fff20947b4f..f5c367cb7643b 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -28,7 +28,7 @@ dependencies { api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" - api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" + api "io.opentelemetry.semconv:opentelemetry-semconv:${versions.opentelemetrysemconv}" api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 deleted file mode 100644 index b0ce00e191830..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a32dfbd7f01de6711fd0e970f8d4b4c0405056d6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..eae141a8d1a23 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 @@ -0,0 +1 @@ +bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 deleted file mode 100644 index 84cb60a2f7acb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f665ff01ce6b964cdf0b8cb5cd1c196dfe94ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6e42973adc581 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 deleted file mode 100644 index eccb15f7b7c8e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f299d336dba1039478497f37b273dfa764c6faef \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..b119468e7f88b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 deleted file mode 100644 index 40537a399ab14..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f1a09e89955e6145babf8bcdf80c95174eb817 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8f653922d6418 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 @@ -0,0 +1 @@ +260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 deleted file mode 100644 index e88b7514ee54d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -15692246539571c41180aff2b55abe527b939a7b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..103da4720de96 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 deleted file mode 100644 index 86937743208c6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -947cf43a6411c4a323e14594431040a476ad43e8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..3db07532ceea9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 deleted file mode 100644 index 068926277253c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f3a14515500e4df260ce7b10a668237a95ac791 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..10d9b7cdfe3e3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 deleted file mode 100644 index bde43937e82e4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bfcea9bd71f97dd4e8a4f92c15ba5659fb07ff05 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..162890965a6eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 @@ -0,0 +1 @@ +6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 deleted file mode 100644 index d425ed61cc4cd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d15a9ea26e8e6ea93287a9f4ee02d91e5a74392 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..d6ce31a31cc6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 @@ -0,0 +1 @@ +2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 deleted file mode 100644 index 6b32d98b0f7c7..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e437ba87004bb63069d04fb06beae65b98dd13a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8a6a9705d836d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 deleted file mode 100644 index 13ef6de11e82d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5985d0950746ad12b49cc42c063f26ddfbcaaacb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..37d79f5c573f7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 deleted file mode 100644 index fc5aad9c9011e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b12825541c5dae52a0fb35045c1b36df3ca8f632 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..80179e4808f50 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 @@ -0,0 +1 @@ +47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 deleted file mode 100644 index ac522b765da05..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c5531fbc44178a7bcfeb7021ae80e70a7c43458 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..fd917a58ba77c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..77b12c99464f6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 @@ -0,0 +1 @@ +207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 deleted file mode 100644 index 089a2484dd1d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e8f7a97a4896a81846553275b9d61885be7ef50 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java similarity index 85% rename from plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java rename to plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java index ed4d13f3abb7d..45caf8bf5f60b 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java @@ -6,12 +6,9 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing; +package org.opensearch.telemetry; import org.opensearch.common.settings.Settings; -import org.opensearch.telemetry.OTelTelemetryPlugin; -import org.opensearch.telemetry.Telemetry; -import org.opensearch.telemetry.TelemetrySettings; import java.util.Optional; diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java new file mode 100644 index 0000000000000..74fc872cb30e3 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.util.Collection; +import java.util.List; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricExporter; + +public class InMemorySingletonMetricsExporter implements MetricExporter { + + public static final InMemorySingletonMetricsExporter INSTANCE = new InMemorySingletonMetricsExporter(InMemoryMetricExporter.create()); + + private static InMemoryMetricExporter delegate; + + public static InMemorySingletonMetricsExporter create() { + return INSTANCE; + } + + private InMemorySingletonMetricsExporter(InMemoryMetricExporter delegate) { + InMemorySingletonMetricsExporter.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection metrics) { + return delegate.export(metrics); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + public List getFinishedMetricItems() { + return delegate.getFinishedMetricItems(); + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java new file mode 100644 index 0000000000000..bcdcb657c4f42 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 1) +public class TelemetryMetricsDisabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityChecksWhenMetricsDisabled() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + + Thread.sleep(2000); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(counter instanceof NoopCounter); + } + +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java new file mode 100644 index 0000000000000..ed341595d327d --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; + +import io.opentelemetry.sdk.metrics.data.DoublePointData; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) +public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), true) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testCounter() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(1.0, value, 0.0); + } + + public void testUpDownCounter() throws Exception { + + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createUpDownCounter("test-up-down-counter", "test", "1"); + counter.add(1.0); + counter.add(-2.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-up-down-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(-1.0, value, 0.0); + } + + @After + public void reset() { + InMemorySingletonMetricsExporter.INSTANCE.reset(); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java index 949a58f6cab41..45ed140e1be94 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index 8a49a0abf5512..156dc344d1ae2 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.tracing.attributes.Attributes; @@ -60,20 +61,22 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { // Create Index and ingest data String indexName = "test-index-11"; - Settings basicSettings = Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0).build(); + Settings basicSettings = Settings.builder() + .put("number_of_shards", 2) + .put("number_of_replicas", 0) + .put("index.routing.allocation.total_shards_per_node", 1) + .build(); createIndex(indexName, basicSettings); - indexRandom(true, client.prepareIndex(indexName).setId("1").setSource("field1", "the fox jumps in the well")); - indexRandom(true, client.prepareIndex(indexName).setId("1").setSource("field2", "another fox did the same.")); + + indexRandom(false, client.prepareIndex(indexName).setId("1").setSource("field1", "the fox jumps in the well")); + indexRandom(false, client.prepareIndex(indexName).setId("2").setSource("field2", "another fox did the same.")); ensureGreen(); refresh(); // Make the search calls; adding the searchType and PreFilterShardSize to make the query path predictable across all the runs. - client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("fox")).get(); - client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("jumps")).get(); - - ensureGreen(); - refresh(); + client.prepareSearch().setSearchType("dfs_query_then_fetch").setPreFilterShardSize(2).setQuery(queryStringQuery("fox")).get(); + client.prepareSearch().setSearchType("dfs_query_then_fetch").setPreFilterShardSize(2).setQuery(queryStringQuery("jumps")).get(); // Sleep for about 3s to wait for traces are published, delay is (the delay is 1s). Thread.sleep(3000); @@ -87,10 +90,10 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { ) ); + // See please https://github.com/opensearch-project/OpenSearch/issues/10291 till local transport is not instrumented, + // capturing only the inter-nodes transport actions. InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; - if (!exporter.getFinishedSpanItems().isEmpty()) { - validators.validate(exporter.getFinishedSpanItems(), 6); - } + validators.validate(exporter.getFinishedSpanItems(), 4); } private static void updateTelemetrySetting(Client client, boolean value) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index b57876c9310f3..297ae8873636f 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -8,14 +8,13 @@ package org.opensearch.telemetry; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; -import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; import org.opensearch.telemetry.tracing.OTelResourceProvider; import org.opensearch.telemetry.tracing.OTelTelemetry; -import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import java.util.Arrays; import java.util.List; @@ -37,6 +36,8 @@ public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { private final Settings settings; + private RefCountedReleasable refCountedOpenTelemetry; + /** * Creates Otel plugin * @param settings cluster settings @@ -58,20 +59,32 @@ public List> getSettings() { @Override public Optional getTelemetry(TelemetrySettings telemetrySettings) { + initializeOpenTelemetrySdk(telemetrySettings); return Optional.of(telemetry(telemetrySettings)); } + private void initializeOpenTelemetrySdk(TelemetrySettings telemetrySettings) { + if (refCountedOpenTelemetry != null) { + return; + } + OpenTelemetrySdk openTelemetrySdk = OTelResourceProvider.get(telemetrySettings, settings); + refCountedOpenTelemetry = new RefCountedReleasable<>("openTelemetry", openTelemetrySdk, openTelemetrySdk::close); + } + @Override public String getName() { return OTEL_TRACER_NAME; } private Telemetry telemetry(TelemetrySettings telemetrySettings) { - final OpenTelemetrySdk openTelemetry = OTelResourceProvider.get(telemetrySettings, settings); - return new OTelTelemetry( - new OTelTracingTelemetry<>(openTelemetry, openTelemetry.getSdkTracerProvider()), - new OTelMetricsTelemetry<>(openTelemetry.getSdkMeterProvider()) - ); + return new OTelTelemetry(refCountedOpenTelemetry); + } + + @Override + public void close() { + if (refCountedOpenTelemetry != null) { + refCountedOpenTelemetry.close(); + } } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java index 8598e5976d20d..6160e5106c041 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; @@ -19,19 +20,24 @@ import io.opentelemetry.api.metrics.DoubleUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * OTel implementation for {@link MetricsTelemetry} */ public class OTelMetricsTelemetry implements MetricsTelemetry { + private final RefCountedReleasable refCountedOpenTelemetry; private final Meter otelMeter; private final T meterProvider; /** * Creates OTel based {@link MetricsTelemetry}. + * @param openTelemetry open telemetry. * @param meterProvider {@link MeterProvider} instance */ - public OTelMetricsTelemetry(T meterProvider) { + public OTelMetricsTelemetry(RefCountedReleasable openTelemetry, T meterProvider) { + this.refCountedOpenTelemetry = openTelemetry; + this.refCountedOpenTelemetry.incRef(); this.meterProvider = meterProvider; this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @@ -63,5 +69,6 @@ public Counter createUpDownCounter(String name, String description, String unit) @Override public void close() throws IOException { meterProvider.close(); + refCountedOpenTelemetry.close(); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index a6a1f12aab8a9..14a19f122c17b 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -30,7 +30,7 @@ import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.sdk.trace.samplers.Sampler; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import io.opentelemetry.semconv.ResourceAttributes; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java index 282fabd43346b..0c697d2cc5e8c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -8,34 +8,39 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; + +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * Otel implementation of Telemetry */ public class OTelTelemetry implements Telemetry { - private final TracingTelemetry tracingTelemetry; - private final MetricsTelemetry metricsTelemetry; + private final RefCountedReleasable refCountedOpenTelemetry; /** * Creates Telemetry instance - * @param tracingTelemetry tracing telemetry - * @param metricsTelemetry metrics telemetry + + */ + /** + * Creates Telemetry instance + * @param refCountedOpenTelemetry open telemetry. */ - public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { - this.tracingTelemetry = tracingTelemetry; - this.metricsTelemetry = metricsTelemetry; + public OTelTelemetry(RefCountedReleasable refCountedOpenTelemetry) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; } @Override public TracingTelemetry getTracingTelemetry() { - return tracingTelemetry; + return new OTelTracingTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkTracerProvider()); } @Override public MetricsTelemetry getMetricsTelemetry() { - return metricsTelemetry; + return new OTelMetricsTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkMeterProvider()); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java index f8fe885ee450c..0fb05a08c27bb 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java @@ -10,8 +10,8 @@ import org.opensearch.core.common.Strings; +import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.BiConsumer; @@ -51,7 +51,7 @@ private static OTelPropagatedSpan getPropagatedSpan(Context context) { } @Override - public Optional extractFromHeaders(Map> headers) { + public Optional extractFromHeaders(Map> headers) { Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), headers, HEADER_TEXT_MAP_GETTER); return Optional.ofNullable(getPropagatedSpan(context)); } @@ -87,9 +87,9 @@ public String get(Map headers, String key) { } }; - private static final TextMapGetter>> HEADER_TEXT_MAP_GETTER = new TextMapGetter<>() { + private static final TextMapGetter>> HEADER_TEXT_MAP_GETTER = new TextMapGetter<>() { @Override - public Iterable keys(Map> headers) { + public Iterable keys(Map> headers) { if (headers != null) { return headers.keySet(); } else { @@ -98,7 +98,7 @@ public Iterable keys(Map> headers) { } @Override - public String get(Map> headers, String key) { + public String get(Map> headers, String key) { if (headers != null && headers.containsKey(key)) { return Strings.collectionToCommaDelimitedString(headers.get(key)); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java index f88afe623fd56..af39617a8c744 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -8,31 +8,33 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelAttributesConverter; import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; import java.io.IOException; -import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.trace.TracerProvider; import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * OTel based Telemetry provider */ public class OTelTracingTelemetry implements TracingTelemetry { - private final OpenTelemetry openTelemetry; + private final RefCountedReleasable refCountedOpenTelemetry; private final T tracerProvider; private final io.opentelemetry.api.trace.Tracer otelTracer; /** * Creates OTel based {@link TracingTelemetry} - * @param openTelemetry OpenTelemetry instance + * @param refCountedOpenTelemetry OpenTelemetry instance * @param tracerProvider {@link TracerProvider} instance. */ - public OTelTracingTelemetry(OpenTelemetry openTelemetry, T tracerProvider) { - this.openTelemetry = openTelemetry; + public OTelTracingTelemetry(RefCountedReleasable refCountedOpenTelemetry, T tracerProvider) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; + this.refCountedOpenTelemetry.incRef(); this.tracerProvider = tracerProvider; this.otelTracer = tracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @@ -40,6 +42,7 @@ public OTelTracingTelemetry(OpenTelemetry openTelemetry, T tracerProvider) { @Override public void close() throws IOException { tracerProvider.close(); + refCountedOpenTelemetry.close(); } @Override @@ -49,7 +52,7 @@ public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) @Override public TracingContextPropagator getContextPropagator() { - return new OTelTracingContextPropagator(openTelemetry); + return new OTelTracingContextPropagator(refCountedOpenTelemetry.get()); } private Span createOtelSpan(SpanCreationContext spanCreationContext, Span parentSpan) { diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java index 233c93e6b9a36..9de575b69774a 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -8,11 +8,13 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelAttributesConverter; import org.opensearch.telemetry.OTelTelemetryPlugin; import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.test.OpenSearchTestCase; +import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.metrics.DoubleCounter; import io.opentelemetry.api.metrics.DoubleCounterBuilder; import io.opentelemetry.api.metrics.DoubleUpDownCounter; @@ -34,12 +36,16 @@ public void testCounter() { String description = "test"; String unit = "1"; Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); MeterProvider meterProvider = mock(MeterProvider.class); when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); - MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); @@ -59,6 +65,7 @@ public void testCounterNegativeValue() { String counterName = "test-counter"; String description = "test"; String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Meter mockMeter = mock(Meter.class); DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); @@ -66,7 +73,10 @@ public void testCounterNegativeValue() { MeterProvider meterProvider = mock(MeterProvider.class); when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); - MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); @@ -83,6 +93,7 @@ public void testUpDownCounter() { String counterName = "test-counter"; String description = "test"; String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Meter mockMeter = mock(Meter.class); DoubleUpDownCounter mockOTelUpDownDoubleCounter = mock(DoubleUpDownCounter.class); LongUpDownCounterBuilder mockOTelLongUpDownCounterBuilder = mock(LongUpDownCounterBuilder.class); @@ -90,7 +101,10 @@ public void testUpDownCounter() { MeterProvider meterProvider = mock(MeterProvider.class); when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); - MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); when(mockMeter.upDownCounterBuilder(counterName)).thenReturn(mockOTelLongUpDownCounterBuilder); when(mockOTelLongUpDownCounterBuilder.setDescription(description)).thenReturn(mockOTelLongUpDownCounterBuilder); when(mockOTelLongUpDownCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongUpDownCounterBuilder); diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java index 16a3ec9493d5d..d865a329104c1 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java @@ -11,8 +11,8 @@ import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; -import java.util.List; import java.util.Map; import io.opentelemetry.api.OpenTelemetry; @@ -57,7 +57,7 @@ public void testExtractTracerContextFromHeader() { } public void testExtractTracerContextFromHttpHeader() { - Map> requestHeaders = new HashMap<>(); + Map> requestHeaders = new HashMap<>(); requestHeaders.put("traceparent", Arrays.asList("00-" + TRACE_ID + "-" + SPAN_ID + "-00")); OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java index 1a508ed252493..1f0c2f674e655 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.OTelTelemetryPlugin; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; @@ -37,7 +38,10 @@ public void testCreateSpanWithoutParent() { when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Attributes attributes = Attributes.create().addAttribute("name", "value"); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null); verify(mockSpanBuilder, never()).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes)); @@ -59,7 +63,10 @@ public void testCreateSpanWithParent() { Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create().addAttribute("name", 1l); Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); @@ -85,7 +92,10 @@ public void testCreateSpanWithParentWithMultipleAttributes() { Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create() .addAttribute("key1", 1l) .addAttribute("key2", 2.0) @@ -125,7 +135,10 @@ public void testGetContextPropagator() { TracerProvider mockTracerProvider = mock(TracerProvider.class); when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); } diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle new file mode 100644 index 0000000000000..7d7eb330b4a55 --- /dev/null +++ b/plugins/transport-reactor-netty4/build.gradle @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.gradle.info.BuildParams +import org.opensearch.gradle.test.RestIntegTestTask +import org.opensearch.gradle.test.TestTask +import org.opensearch.gradle.test.rest.JavaRestTestPlugin +import org.opensearch.gradle.test.InternalClusterTestPlugin + +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.java-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + +// The transport-reactor-netty4 plugin is published to maven +apply plugin: 'opensearch.publish' + +opensearchplugin { + description 'Reactor Netty 4 based transport implementation' + classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin' + hasClientJar = true +} + +dependencies { + // network stack + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-codec:${versions.netty}" + api "io.netty:netty-codec-dns:${versions.netty}" + api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "io.netty:netty-handler:${versions.netty}" + api "io.netty:netty-resolver-dns:${versions.netty}" + api "io.netty:netty-resolver:${versions.netty}" + api "io.netty:netty-transport:${versions.netty}" + api "io.netty:netty-transport-native-unix-common:${versions.netty}" + + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" + testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + testImplementation project(":modules:transport-netty4") +} + +restResources { + restApi { + includeCore '_common', 'cluster', 'nodes' + } +} + +tasks.named("dependencyLicenses").configure { + mapping from: /netty-.*/, to: 'netty' + mapping from: /reactor-.*/, to: 'reactor' +} + +// TODO: Remove that once we have a complete test suite +testingConventions.enabled = false + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +internalClusterTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +javaRestTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.CertificateCompressionAlgo', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + 'io.netty.internal.tcnative.SSLPrivateKeyMethod', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', + 'io.netty.internal.tcnative.CertificateCallback', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLSession', + 'io.netty.internal.tcnative.SSLSessionCache', + 'io.netty.channel.epoll.Epoll', + 'io.netty.channel.epoll.EpollDatagramChannel', + 'io.netty.channel.epoll.EpollServerSocketChannel', + 'io.netty.channel.epoll.EpollSocketChannel', + 'io.netty.channel.kqueue.KQueue', + 'io.netty.channel.kqueue.KQueueDatagramChannel', + 'io.netty.channel.kqueue.KQueueServerSocketChannel', + 'io.netty.channel.kqueue.KQueueSocketChannel', + 'io.netty.handler.codec.haproxy.HAProxyMessage', + 'io.netty.handler.codec.haproxy.HAProxyMessageDecoder', + 'io.netty.handler.proxy.ProxyHandler', + 'io.netty.incubator.channel.uring.IOUring', + 'io.netty.incubator.channel.uring.IOUringDatagramChannel', + 'io.netty.incubator.channel.uring.IOUringServerSocketChannel', + 'io.netty.incubator.channel.uring.IOUringSocketChannel', + + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'io.micrometer.common.KeyValue', + 'io.micrometer.common.KeyValues', + 'io.micrometer.common.docs.KeyName', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Gauge', + 'io.micrometer.core.instrument.Gauge$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.Meter$Type', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.docs.MeterDocumentation', + 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.observation.Observation', + 'io.micrometer.observation.Observation$Context', + 'io.micrometer.observation.ObservationHandler', + 'io.micrometer.observation.ObservationRegistry', + 'io.micrometer.observation.ObservationRegistry$ObservationConfig', + 'io.micrometer.observation.docs.ObservationDocumentation', + 'io.micrometer.observation.transport.ReceiverContext', + 'io.micrometer.observation.transport.RequestReplyReceiverContext', + 'io.micrometer.observation.transport.RequestReplySenderContext', + 'io.micrometer.observation.transport.SenderContext', + 'io.micrometer.tracing.Span', + 'io.micrometer.tracing.Tracer', + 'io.micrometer.tracing.docs.SpanDocumentation', + 'io.micrometer.tracing.handler.DefaultTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', + 'io.micrometer.tracing.propagation.Propagator' + ) + + ignoreViolations( + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5' + ) +} diff --git a/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt new file mode 100644 index 0000000000000..5bbf91a14de23 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt @@ -0,0 +1,116 @@ + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..0789dea62778a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +24d0f4f2b0b0b55acc498d62f9c0921d0ea6da7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..450c34d975bb9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d3f503d7554cf25edc6b83b9b4724b240c6e91d5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt rename to plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..352d69396d0c9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 @@ -0,0 +1 @@ +378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..1bcb0e0c52950 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 @@ -0,0 +1 @@ +e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java new file mode 100644 index 0000000000000..abbd50bf1b235 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch; + +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.Netty4ModulePlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.Collection; +import java.util.List; + +public abstract class OpenSearchReactorNetty4IntegTestCase extends OpenSearchIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected boolean addMockTransportService() { + return false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // randomize netty settings + if (randomBoolean()) { + builder.put(ReactorNetty4Transport.SETTING_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + } + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME); + return builder.build(); + } + + @Override + protected Collection> nodePlugins() { + return List.of(ReactorNetty4Plugin.class, Netty4ModulePlugin.class); + } +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java new file mode 100644 index 0000000000000..833d60375a2bd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +/** + * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. + * + * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing + * a single node "cluster". + */ +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) +public class ReactorNetty4HttpRequestSizeLimitIT extends OpenSearchReactorNetty4IntegTestCase { + + private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), LIMIT) + .build(); + } + + public void testLimitsInFlightRequests() throws Exception { + ensureGreen(); + + // we use the limit size as a (very) rough indication on how many requests we should sent to hit the limit + int numRequests = LIMIT.bytesAsInt() / 100; + + StringBuilder bulkRequest = new StringBuilder(); + for (int i = 0; i < numRequests; i++) { + bulkRequest.append("{\"index\": {}}"); + bulkRequest.append(System.lineSeparator()); + bulkRequest.append("{ \"field\" : \"value\" }"); + bulkRequest.append(System.lineSeparator()); + } + + List> requests = new ArrayList<>(); + for (int i = 0; i < 150; i++) { + requests.add(Tuple.tuple("/index/_bulk", bulkRequest)); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); + try { + assertThat(singleResponse, hasSize(1)); + assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK); + + final Collection multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); + try { + assertThat(multipleResponses, hasSize(requests.size())); + assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.TOO_MANY_REQUESTS); + } finally { + multipleResponses.forEach(ReferenceCounted::release); + } + } finally { + singleResponse.forEach(ReferenceCounted::release); + } + } + } + + public void testDoesNotLimitExcludedRequests() throws Exception { + ensureGreen(); + + List> requestUris = new ArrayList<>(); + for (int i = 0; i < 1500; i++) { + requestUris.add(Tuple.tuple("/_cluster/settings", "{ \"transient\": {\"search.default_search_timeout\": \"40s\" } }")); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); + try { + assertThat(responses, hasSize(requestUris.size())); + assertAllInExpectedStatus(responses, HttpResponseStatus.OK); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertAtLeastOnceExpectedStatus(Collection responses, HttpResponseStatus expectedStatus) { + long countExpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus)).count(); + assertThat("Expected at least one request with status [" + expectedStatus + "]", countExpectedStatus, greaterThan(0L)); + } + + private void assertAllInExpectedStatus(Collection responses, HttpResponseStatus expectedStatus) { + long countUnexpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus) == false).count(); + assertThat( + "Expected all requests with status [" + expectedStatus + "] but [" + countUnexpectedStatus + "] requests had a different one", + countUnexpectedStatus, + equalTo(0L) + ); + } + +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java new file mode 100644 index 0000000000000..c0e43de06f6ff --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class ReactorNetty4PipeliningIT extends OpenSearchReactorNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsPipelining() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + Collection responses = client.get(transportAddress.address(), true, requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = ReactorHttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInOrder(Collection opaqueIds) { + // check if opaque ids are monotonically increasing + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + for (String opaqueId : opaqueIds) { + assertThat(msg, opaqueId, is(String.valueOf(i++))); + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java new file mode 100644 index 0000000000000..bd75227dabd08 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.rest.RestRequest; + +import io.netty.handler.codec.http.HttpMethod; + +final class HttpConversionUtil { + private HttpConversionUtil() {} + + /** + * Converts {@link HttpMethod} to {@link RestRequest.Method} + * @param method {@link HttpMethod} method + * @return corresponding {@link RestRequest.Method} + * @throws IllegalArgumentException if HTTP method is not supported + */ + public static RestRequest.Method convertMethod(HttpMethod method) { + if (method == HttpMethod.GET) { + return RestRequest.Method.GET; + } else if (method == HttpMethod.POST) { + return RestRequest.Method.POST; + } else if (method == HttpMethod.PUT) { + return RestRequest.Method.PUT; + } else if (method == HttpMethod.DELETE) { + return RestRequest.Method.DELETE; + } else if (method == HttpMethod.HEAD) { + return RestRequest.Method.HEAD; + } else if (method == HttpMethod.OPTIONS) { + return RestRequest.Method.OPTIONS; + } else if (method == HttpMethod.PATCH) { + return RestRequest.Method.PATCH; + } else if (method == HttpMethod.TRACE) { + return RestRequest.Method.TRACE; + } else if (method == HttpMethod.CONNECT) { + return RestRequest.Method.CONNECT; + } else { + throw new IllegalArgumentException("Unexpected http method: " + method); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java new file mode 100644 index 0000000000000..98b359319ff1b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingHttpChannel implements HttpChannel { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompletableContext closeContext = new CompletableContext<>(); + private final FluxSink emitter; + + NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) { + this.request = request; + this.response = response; + this.emitter = emitter; + this.request.withConnection(connection -> Netty4Utils.addListener(connection.channel().closeFuture(), closeContext)); + } + + @Override + public boolean isOpen() { + final AtomicBoolean isOpen = new AtomicBoolean(); + request.withConnection(connection -> isOpen.set(connection.channel().isOpen())); + return isOpen.get(); + } + + @Override + public void close() { + request.withConnection(connection -> connection.channel().close()); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) response.remoteAddress(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) response.hostAddress(); + } + + FullHttpResponse createResponse(HttpResponse response) { + return (FullHttpResponse) response; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java new file mode 100644 index 0000000000000..d43e23e800e65 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpRequest; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import io.netty.buffer.CompositeByteBuf; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.LastHttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingRequestConsumer implements Consumer, Publisher, Disposable { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompositeByteBuf content; + private final Publisher publisher; + private final AbstractHttpServerTransport transport; + private final AtomicBoolean disposed = new AtomicBoolean(false); + private volatile FluxSink emitter; + + NonStreamingRequestConsumer( + AbstractHttpServerTransport transport, + HttpServerRequest request, + HttpServerResponse response, + int maxCompositeBufferComponents + ) { + this.transport = transport; + this.request = request; + this.response = response; + this.content = response.alloc().compositeBuffer(maxCompositeBufferComponents); + this.publisher = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter.onDispose(this).onCancel(this); + } + + @Override + public void accept(T message) { + try { + if (message instanceof LastHttpContent) { + process(message, emitter); + } else if (message instanceof HttpContent) { + process(message, emitter); + } + } catch (Throwable ex) { + emitter.error(ex); + } + } + + public void process(HttpContent in, FluxSink emitter) { + // Consume request body in full before dispatching it + content.addComponent(true, in.content().retain()); + + if (in instanceof LastHttpContent) { + final NonStreamingHttpChannel channel = new NonStreamingHttpChannel(request, response, emitter); + final HttpRequest r = createRequest(request, content); + + try { + transport.incomingRequest(r, channel); + } catch (Exception ex) { + emitter.error(ex); + transport.onException(channel, ex); + } finally { + r.release(); + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } + } + } + + HttpRequest createRequest(HttpServerRequest request, CompositeByteBuf content) { + return new ReactorNetty4HttpRequest(request, content.retain()); + } + + @Override + public void subscribe(Subscriber s) { + publisher.subscribe(s); + } + + @Override + public void dispose() { + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java new file mode 100644 index 0000000000000..4406c555a5b04 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java @@ -0,0 +1,272 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.util.AbstractMap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; +import reactor.netty.http.server.HttpServerRequest; + +class ReactorNetty4HttpRequest implements HttpRequest { + private final String protocol; + private final HttpMethod method; + private final String uri; + private final ByteBuf content; + private final HttpHeadersMap headers; + private final AtomicBoolean released; + private final Exception inboundException; + private final boolean pooled; + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content) { + this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), true, content); + } + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content, Exception inboundException) { + this( + request.protocol(), + request.method(), + request.uri(), + new HttpHeadersMap(request.requestHeaders()), + new AtomicBoolean(false), + true, + content, + inboundException + ); + } + + private ReactorNetty4HttpRequest( + HttpServerRequest request, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content + ) { + this(request.protocol(), request.method(), request.uri(), headers, released, pooled, content, null); + } + + private ReactorNetty4HttpRequest( + String protocol, + HttpMethod method, + String uri, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content, + Exception inboundException + ) { + + this.protocol = protocol; + this.method = method; + this.uri = uri; + this.headers = headers; + this.content = content; + this.pooled = pooled; + this.released = released; + this.inboundException = inboundException; + } + + @Override + public RestRequest.Method method() { + return HttpConversionUtil.convertMethod(method); + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + assert released.get() == false; + return Netty4Utils.toBytesReference(content); + } + + @Override + public void release() { + if (pooled && released.compareAndSet(false, true)) { + content.release(); + } + } + + @Override + public HttpRequest releaseAndCopy() { + assert released.get() == false; + if (pooled == false) { + return this; + } + try { + final ByteBuf copiedContent = Unpooled.copiedBuffer(content); + return new ReactorNetty4HttpRequest(protocol, method, uri, headers, new AtomicBoolean(false), false, copiedContent, null); + } finally { + release(); + } + } + + @Override + public final Map> getHeaders() { + return headers; + } + + @Override + public List strictCookies() { + String cookieString = headers.httpHeaders.get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + return ServerCookieEncoder.STRICT.encode(cookies); + } + } + return Collections.emptyList(); + } + + @Override + public HttpVersion protocolVersion() { + if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_0.toString())) { + return HttpRequest.HttpVersion.HTTP_1_0; + } else if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_1.toString())) { + return HttpRequest.HttpVersion.HTTP_1_1; + } else { + throw new IllegalArgumentException("Unexpected http protocol version: " + protocol); + } + } + + @Override + public HttpRequest removeHeader(String header) { + HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(headers.httpHeaders); + headersWithoutContentTypeHeader.remove(header); + + return new ReactorNetty4HttpRequest( + protocol, + method, + uri, + new HttpHeadersMap(headersWithoutContentTypeHeader), + released, + pooled, + content, + null + ); + } + + @Override + public ReactorNetty4HttpResponse createResponse(RestStatus status, BytesReference content) { + return new ReactorNetty4HttpResponse( + headers.httpHeaders, + io.netty.handler.codec.http.HttpVersion.valueOf(protocol), + status, + content + ); + } + + @Override + public Exception getInboundException() { + return inboundException; + } + + /** + * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications + * and due to the underlying implementation, it performs case insensitive lookups of key to values. + * + * It is important to note that this implementation does have some downsides in that each invocation of the + * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a + * view of the underlying values. + */ + private static class HttpHeadersMap implements Map> { + + private final HttpHeaders httpHeaders; + + private HttpHeadersMap(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + } + + @Override + public int size() { + return httpHeaders.size(); + } + + @Override + public boolean isEmpty() { + return httpHeaders.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return key instanceof String && httpHeaders.contains((String) key); + } + + @Override + public boolean containsValue(Object value) { + return value instanceof List && httpHeaders.names().stream().map(httpHeaders::getAll).anyMatch(value::equals); + } + + @Override + public List get(Object key) { + return key instanceof String ? httpHeaders.getAll((String) key) : null; + } + + @Override + public List put(String key, List value) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public List remove(Object key) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void putAll(Map> m) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public Set keySet() { + return httpHeaders.names(); + } + + @Override + public Collection> values() { + return httpHeaders.names().stream().map(k -> Collections.unmodifiableList(httpHeaders.getAll(k))).collect(Collectors.toList()); + } + + @Override + public Set>> entrySet() { + return httpHeaders.names() + .stream() + .map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + .collect(Collectors.toSet()); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java new file mode 100644 index 0000000000000..c45ad54b668a3 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; + +class ReactorNetty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse { + private final HttpHeaders requestHeaders; + + ReactorNetty4HttpResponse(HttpHeaders requestHeaders, HttpVersion version, RestStatus status, BytesReference content) { + super(version, HttpResponseStatus.valueOf(status.getStatus()), Netty4Utils.toByteBuf(content)); + this.requestHeaders = requestHeaders; + } + + @Override + public void addHeader(String name, String value) { + headers().add(name, value); + } + + @Override + public boolean containsHeader(String name) { + return headers().contains(name); + } + + public HttpHeaders requestHeaders() { + return requestHeaders; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java new file mode 100644 index 0000000000000..84360bf028ba9 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; + +import io.netty.channel.Channel; + +class ReactorNetty4HttpServerChannel implements HttpServerChannel { + private final Channel channel; + private final CompletableContext closeContext = new CompletableContext<>(); + + ReactorNetty4HttpServerChannel(Channel channel) { + this.channel = channel; + Netty4Utils.addListener(this.channel.closeFuture(), closeContext); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) channel.localAddress(); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() { + channel.close(); + } + + @Override + public String toString() { + return "ReactorNetty4HttpChannel{localAddress=" + getLocalAddress() + "}"; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java new file mode 100644 index 0000000000000..d4a5a9ad83af6 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpReadTimeoutException; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.net.SocketOption; +import java.time.Duration; + +import io.netty.channel.ChannelOption; +import io.netty.channel.socket.nio.NioChannelOption; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.timeout.ReadTimeoutException; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; +import reactor.netty.DisposableServer; +import reactor.netty.http.HttpProtocol; +import reactor.netty.http.server.HttpServer; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_COUNT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_IDLE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_INTERVAL; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; + +/** + * The HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + */ +public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTransport { + private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; + private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("opensearch.net.mtu", "1500"))); + + /** + * The number of Reactor Netty HTTP workers + */ + public static final Setting SETTING_HTTP_WORKER_COUNT = Setting.intSetting("http.netty.worker_count", 0, Property.NodeScope); + + /** + * The maximum number of composite components for request accumulation + */ + public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( + SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + (s) -> { + ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); + /* + * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of + * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate + * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() + * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also + * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). + * + * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see + * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for + * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The + * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). + * We assume here that OpenSearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. + * + * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. + * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. + * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. + */ + long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); + // clamp value to the allowed range + long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); + return String.valueOf(maxBufferComponents); + // Netty's CompositeByteBuf implementation does not allow less than two components. + }, + s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), + Property.NodeScope + ); + + private final SharedGroupFactory sharedGroupFactory; + private final int readTimeoutMillis; + private final int connectTimeoutMillis; + private final int maxCompositeBufferComponents; + private final ByteSizeValue maxInitialLineLength; + private final ByteSizeValue maxHeaderSize; + private final ByteSizeValue maxChunkSize; + private volatile SharedGroupFactory.SharedGroup sharedGroup; + private volatile DisposableServer disposableServer; + private volatile Scheduler scheduler; + + /** + * Creates new HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + * @param tracer tracer instance + */ + public ReactorNetty4HttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); + Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); + this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); + this.connectTimeoutMillis = Math.toIntExact(SETTING_HTTP_CONNECT_TIMEOUT.get(settings).getMillis()); + this.sharedGroupFactory = sharedGroupFactory; + this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); + this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); + this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + } + + /** + * Binds the transport engine to the socket address + * @param socketAddress socket address to bind to + */ + @Override + protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Exception { + final HttpServer server = configureChannelOptions( + HttpServer.create() + .httpFormDecoder(builder -> builder.scheduler(scheduler)) + .idleTimeout(Duration.ofMillis(connectTimeoutMillis)) + .readTimeout(Duration.ofMillis(readTimeoutMillis)) + .runOn(sharedGroup.getLowLevelGroup()) + .bindAddress(() -> socketAddress) + .compress(true) + .httpRequestDecoder( + spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt()) + .maxHeaderSize(maxHeaderSize.bytesAsInt()) + .maxInitialLineLength(maxInitialLineLength.bytesAsInt()) + ) + .protocol(HttpProtocol.HTTP11, HttpProtocol.H2C) + .handle((req, res) -> incomingRequest(req, res)) + ); + + disposableServer = server.bindNow(); + return new ReactorNetty4HttpServerChannel(disposableServer.channel()); + } + + private HttpServer configureChannelOptions(final HttpServer server1) { + HttpServer configured = server1.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)) + .childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + if (SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)) { + // Netty logs a warning if it can't set the option, so try this only on supported platforms + if (IOUtils.LINUX || IOUtils.MAC_OS_X) { + if (SETTING_HTTP_TCP_KEEP_IDLE.get(settings) >= 0) { + final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); + if (keepIdleOption != null) { + configured = configured.childOption(NioChannelOption.of(keepIdleOption), SETTING_HTTP_TCP_KEEP_IDLE.get(settings)); + } + } + if (SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) >= 0) { + final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); + if (keepIntervalOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepIntervalOption), + SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) + ); + } + } + if (SETTING_HTTP_TCP_KEEP_COUNT.get(settings) >= 0) { + final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); + if (keepCountOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepCountOption), + SETTING_HTTP_TCP_KEEP_COUNT.get(settings) + ); + } + } + } + } + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + configured = configured.option(ChannelOption.SO_REUSEADDR, reuseAddress); + configured = configured.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + return configured; + } + + /** + * Handles incoming Reactor Netty request + * @param request request instance + * @param response response instances + * @return response publisher + */ + protected Publisher incomingRequest(HttpServerRequest request, HttpServerResponse response) { + final NonStreamingRequestConsumer consumer = new NonStreamingRequestConsumer<>( + this, + request, + response, + maxCompositeBufferComponents + ); + + request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer); + + return Mono.from(consumer).flatMap(hc -> { + final FullHttpResponse r = (FullHttpResponse) hc; + response.status(r.status()); + response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue()))); + response.chunkedTransfer(false); + response.compression(true); + r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); + return Mono.from(response.sendObject(r.content())); + }); + } + + /** + * Called to tear down internal resources + */ + @Override + protected void stopInternal() { + if (sharedGroup != null) { + sharedGroup.shutdown(); + sharedGroup = null; + } + + if (scheduler != null) { + scheduler.dispose(); + scheduler = null; + } + + if (disposableServer != null) { + disposableServer.disposeNow(); + disposableServer = null; + } + } + + /** + * Starts the transport + */ + @Override + protected void doStart() { + boolean success = false; + try { + scheduler = Schedulers.newBoundedElastic( + Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE, + Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, + "http-form-decoder" + ); + sharedGroup = sharedGroupFactory.getHttpGroup(); + bindServer(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } + } + } + + @Override + public void onException(HttpChannel channel, Exception cause) { + if (cause instanceof ReadTimeoutException) { + super.onException(channel, new HttpReadTimeoutException(readTimeoutMillis, cause)); + } else { + super.onException(channel, cause); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..b5ecb0b62f79d --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new HTTP transport implementations based on Reactor Netty. + */ +package org.opensearch.http.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java new file mode 100644 index 0000000000000..dc310c3793109 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor; + +import org.opensearch.common.SetOnce; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +public class ReactorNetty4Plugin extends Plugin implements NetworkPlugin { + /** + * The name of new experimental HTTP transport implementations based on Reactor Netty. + */ + public static final String REACTOR_NETTY_HTTP_TRANSPORT_NAME = "reactor-netty4"; + + private final SetOnce groupFactory = new SetOnce<>(); + + /** + * Default constructor + */ + public ReactorNetty4Plugin() {} + + /** + * Returns a list of additional {@link Setting} definitions for this plugin. + */ + @Override + public List> getSettings() { + return Arrays.asList(/* no setting registered since we're picking the onces from Netty 4 transport */); + } + + /** + * Returns a map of {@link HttpServerTransport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param pageCacheRecycler page cache recycler instance + * @param circuitBreakerService circuit breaker service instance + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param tracer tracer instance + */ + @Override + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + REACTOR_NETTY_HTTP_TRANSPORT_NAME, + () -> new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + private SharedGroupFactory getSharedGroupFactory(Settings settings) { + final SharedGroupFactory groupFactory = this.groupFactory.get(); + if (groupFactory != null) { + assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; + return groupFactory; + } else { + this.groupFactory.set(new SharedGroupFactory(settings)); + return this.groupFactory.get(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java new file mode 100644 index 0000000000000..ab7de33c8e673 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.reactor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; + +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; + +/** + * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for + * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if + * {@link org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport#SETTING_HTTP_WORKER_COUNT} is configured to be 0. + * If that setting is not 0, then it will return a different group in the {@link #getHttpGroup()} call. + */ +public final class SharedGroupFactory { + + private static final Logger logger = LogManager.getLogger(SharedGroupFactory.class); + + private final Settings settings; + private final int workerCount; + private final int httpWorkerCount; + + private RefCountedGroup genericGroup; + private SharedGroup dedicatedHttpGroup; + + /** + * Creates new shared group factory instance from settings + * @param settings settings + */ + public SharedGroupFactory(Settings settings) { + this.settings = settings; + this.workerCount = ReactorNetty4Transport.SETTING_WORKER_COUNT.get(settings); + this.httpWorkerCount = ReactorNetty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.get(settings); + } + + Settings getSettings() { + return settings; + } + + /** + * Gets the number of configured transport workers + * @return the number of configured transport workers + */ + public int getTransportWorkerCount() { + return workerCount; + } + + /** + * Gets transport shared group + * @return transport shared group + */ + public synchronized SharedGroup getTransportGroup() { + return getGenericGroup(); + } + + /** + * Gets HTTP transport shared group + * @return HTTP transport shared group + */ + public synchronized SharedGroup getHttpGroup() { + if (httpWorkerCount == 0) { + return getGenericGroup(); + } else { + if (dedicatedHttpGroup == null) { + NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( + httpWorkerCount, + daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + ); + dedicatedHttpGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); + } + return dedicatedHttpGroup; + } + } + + private SharedGroup getGenericGroup() { + if (genericGroup == null) { + EventLoopGroup eventLoopGroup = new NioEventLoopGroup( + workerCount, + daemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) + ); + this.genericGroup = new RefCountedGroup(eventLoopGroup); + } else { + genericGroup.incRef(); + } + return new SharedGroup(genericGroup); + } + + private static class RefCountedGroup extends AbstractRefCounted { + + public static final String NAME = "ref-counted-event-loop-group"; + private final EventLoopGroup eventLoopGroup; + + private RefCountedGroup(EventLoopGroup eventLoopGroup) { + super(NAME); + this.eventLoopGroup = eventLoopGroup; + } + + @Override + protected void closeInternal() { + Future shutdownFuture = eventLoopGroup.shutdownGracefully(0, 5, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (shutdownFuture.isSuccess() == false) { + logger.warn("Error closing netty event loop group", shutdownFuture.cause()); + } + } + } + + /** + * Wraps the {@link RefCountedGroup}. Calls {@link RefCountedGroup#decRef()} on close. After close, + * this wrapped instance can no longer be used. + */ + public static class SharedGroup { + + private final RefCountedGroup refCountedGroup; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + private SharedGroup(RefCountedGroup refCountedGroup) { + this.refCountedGroup = refCountedGroup; + } + + /** + * Gets Netty's {@link EventLoopGroup} instance + * @return Netty's {@link EventLoopGroup} instance + */ + public EventLoopGroup getLowLevelGroup() { + return refCountedGroup.eventLoopGroup; + } + + /** + * Decreases the reference to underlying {@link EventLoopGroup} instance + */ + public void shutdown() { + if (isOpen.compareAndSet(true, false)) { + refCountedGroup.decRef(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java new file mode 100644 index 0000000000000..8ec432b7dd5cd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor.netty4; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.Booleans; +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.util.NettyRuntime; + +/** + * Shameless copy of Netty4Utils from transport-netty4 module + */ +public final class Netty4Utils { + private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); + + /** + * Utility class + */ + private Netty4Utils() {} + + /** + * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). + * + * @param availableProcessors the number of available processors + * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value + */ + public static void setAvailableProcessors(final int availableProcessors) { + // we set this to false in tests to avoid tests that randomly set processors from stepping on each other + final boolean set = Booleans.parseBoolean(System.getProperty("opensearch.set.netty.runtime.available.processors", "true")); + if (!set) { + return; + } + + /* + * This can be invoked twice, once from Netty4Transport and another time from Netty4HttpServerTransport; however, + * Netty4Runtime#availableProcessors forbids settings the number of processors twice so we prevent double invocation here. + */ + if (isAvailableProcessorsSet.compareAndSet(false, true)) { + NettyRuntime.setAvailableProcessors(availableProcessors); + } else if (availableProcessors != NettyRuntime.availableProcessors()) { + /* + * We have previously set the available processors yet either we are trying to set it to a different value now or there is a bug + * in Netty and our previous value did not take, bail. + */ + final String message = String.format( + Locale.ROOT, + "available processors value [%d] did not match current value [%d]", + availableProcessors, + NettyRuntime.availableProcessors() + ); + throw new IllegalStateException(message); + } + } + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + * @param reference reference to convert + */ + public static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + + if (buffers.size() == 1) { + return buffers.get(0); + } else { + CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + + /** + * Wraps the given ChannelBuffer with a BytesReference + * @param buffer buffer to convert + */ + public static BytesReference toBytesReference(final ByteBuf buffer) { + final int readableBytes = buffer.readableBytes(); + if (readableBytes == 0) { + return BytesArray.EMPTY; + } else if (buffer.hasArray()) { + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), readableBytes); + } else { + final ByteBuffer[] byteBuffers = buffer.nioBuffers(); + return BytesReference.fromByteBuffers(byteBuffers); + } + } + + /** + * Add completion listener to ChannelFuture + * @param channelFuture ChannelFuture to add listener to + * @param context completion listener context + */ + public static void addListener(ChannelFuture channelFuture, CompletableContext context) { + channelFuture.addListener(f -> { + if (f.isSuccess()) { + context.complete(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + context.completeExceptionally(new Exception(cause)); + } else { + context.completeExceptionally((Exception) cause); + } + } + }); + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java new file mode 100644 index 0000000000000..b3e92f58c540a --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.reactor.netty4; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; + +import reactor.netty.tcp.TcpServer; + +/** + * The transport implementations based on Reactor Netty (see please {@link TcpServer}). + */ +public class ReactorNetty4Transport { + /** + * The number of Netty workers + */ + public static final Setting SETTING_WORKER_COUNT = new Setting<>( + "transport.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), + Property.NodeScope + ); + + /** + * Default constructor + */ + public ReactorNetty4Transport() {} +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..921bca104c6fe --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java new file mode 100644 index 0000000000000..2f36ebb7f11f8 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor; diff --git a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..4f2dcde995338 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java new file mode 100644 index 0000000000000..443ecd0f40ead --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -0,0 +1,208 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.tasks.Task; + +import java.io.Closeable; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.resolver.DefaultAddressResolverGroup; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.ParallelFlux; +import reactor.netty.http.client.HttpClient; + +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/** + * Tiny helper to send http requests over netty. + */ +class ReactorHttpClient implements Closeable { + private final boolean compression; + + static Collection returnHttpResponseBodies(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (FullHttpResponse response : responses) { + list.add(response.content().toString(StandardCharsets.UTF_8)); + } + return list; + } + + static Collection returnOpaqueIds(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (HttpResponse response : responses) { + list.add(response.headers().get(Task.X_OPAQUE_ID)); + } + return list; + } + + ReactorHttpClient(boolean compression) { + this.compression = compression; + } + + static ReactorHttpClient create() { + return create(true); + } + + static ReactorHttpClient create(boolean compression) { + return new ReactorHttpClient(compression); + } + + public List get(InetSocketAddress remoteAddress, String... uris) throws InterruptedException { + return get(remoteAddress, false, uris); + } + + public List get(InetSocketAddress remoteAddress, boolean ordered, String... uris) throws InterruptedException { + final List requests = new ArrayList<>(uris.length); + + for (int i = 0; i < uris.length; i++) { + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); + httpRequest.headers().add(HOST, "localhost"); + httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + requests.add(httpRequest); + } + + return sendRequests(remoteAddress, requests, ordered); + } + + public final Collection post(InetSocketAddress remoteAddress, List> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + final List responses = sendRequests(remoteAddress, Collections.singleton(httpRequest), false); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest, HttpContent content) + throws InterruptedException { + final List responses = sendRequests( + remoteAddress, + Collections.singleton( + new DefaultFullHttpRequest( + httpRequest.protocolVersion(), + httpRequest.method(), + httpRequest.uri(), + content.content(), + httpRequest.headers(), + httpRequest.trailingHeaders() + ) + ), + false + ); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final Collection put(InetSocketAddress remoteAddress, List> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.PUT, remoteAddress, urisAndBodies); + } + + private List processRequestsWithBody( + HttpMethod method, + InetSocketAddress remoteAddress, + List> urisAndBodies + ) throws InterruptedException { + List requests = new ArrayList<>(urisAndBodies.size()); + for (int i = 0; i < urisAndBodies.size(); ++i) { + final Tuple uriAndBody = urisAndBodies.get(i); + ByteBuf content = Unpooled.copiedBuffer(uriAndBody.v2(), StandardCharsets.UTF_8); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, method, uriAndBody.v1(), content); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + request.headers().add("X-Opaque-ID", String.valueOf(i)); + requests.add(request); + } + return sendRequests(remoteAddress, requests, false); + } + + private List sendRequests( + final InetSocketAddress remoteAddress, + final Collection requests, + boolean orderer + ) { + final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); + try { + final HttpClient client = HttpClient.newConnection() + .resolver(DefaultAddressResolverGroup.INSTANCE) + .runOn(eventLoopGroup) + .host(remoteAddress.getHostString()) + .port(remoteAddress.getPort()) + .compress(compression); + + @SuppressWarnings("unchecked") + final Mono[] monos = requests.stream() + .map( + request -> client.headers(h -> h.add(request.headers())) + .baseUrl(request.getUri()) + .request(request.method()) + .send(Mono.fromSupplier(() -> request.content())) + .responseSingle( + (r, body) -> body.switchIfEmpty(Mono.just(Unpooled.EMPTY_BUFFER)) + .map( + b -> new DefaultFullHttpResponse( + r.version(), + r.status(), + b.retain(), + r.responseHeaders(), + EmptyHttpHeaders.INSTANCE + ) + ) + ) + ) + .toArray(Mono[]::new); + + if (orderer == false) { + return ParallelFlux.from(monos).sequential().collectList().block(); + } else { + return Flux.concat(monos).flatMapSequential(r -> Mono.just(r)).collectList().block(); + } + } finally { + eventLoopGroup.shutdownGracefully().awaitUninterruptibly(); + } + } + + @Override + public void close() { + + } +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java new file mode 100644 index 0000000000000..00ca378a4e46b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ReactorNetty4BadRequestTests extends OpenSearchTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new OpenSearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + Settings settings = Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()).build(); + try ( + HttpServerTransport httpServerTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create()) { + final List responses = nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + + try { + assertThat(responses, hasSize(1)); + final FullHttpResponse response = responses.get(0); + assertThat(response.status().code(), equalTo(400)); + final Collection responseBodies = ReactorHttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + final String body = responseBodies.iterator().next(); + assertThat(body, containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + body, + containsString("\"reason\":\"java.lang.IllegalArgumentException: partial escape sequence at end of string: %/\"") + ); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..15a5b04c802a4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java @@ -0,0 +1,579 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocatorMetric; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; + +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link ReactorNetty4HttpServerTransport} class. + */ +public class ReactorNetty4HttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + // Reactor Netty 4 does not expose 100 CONTINUE response but instead just asks for content + final HttpContent continuationRequest = new DefaultHttpContent(Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), request, continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } finally { + continuationResponse.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + initialSettings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + ReactorNetty4HttpServerTransport otherTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final String url = "/" + randomAlphaOfLength(maxInitialLineLength); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.REQUEST_URI_TOO_LONG)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testDispatchFailed() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new RuntimeException("Bad things happen"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.INTERNAL_SERVER_ERROR)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing/"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + long numOfHugeAllocations = getHugeAllocationCount(); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(getHugeAllocationCount(), equalTo(numOfHugeAllocations)); + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + private long getHugeAllocationCount() { + long numOfHugAllocations = 0; + ByteBufAllocator allocator = NettyAllocator.getAllocator(); + assert allocator instanceof NettyAllocator.NoDirectBuffers; + ByteBufAllocator delegate = ((NettyAllocator.NoDirectBuffers) allocator).getDelegate(); + if (delegate instanceof PooledByteBufAllocator) { + PooledByteBufAllocatorMetric metric = ((PooledByteBufAllocator) delegate).metric(); + numOfHugAllocations = metric.heapArenas().stream().mapToLong(PoolArenaMetric::numHugeAllocations).sum(); + } + return numOfHugAllocations; + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testConnectTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + NioEventLoopGroup group = new NioEventLoopGroup(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + final CountDownLatch channelClosedLatch = new CountDownLatch(1); + + final Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer() { + + @Override + protected void initChannel(SocketChannel ch) { + ch.pipeline().addLast(new ChannelHandlerAdapter() { + }); + + } + }) + .group(group); + ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); + + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + + } finally { + group.shutdownGracefully().await(); + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 66c6525439dac..082ed5277575a 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -70,6 +70,11 @@ tasks.dependenciesInfo.enabled = false tasks.thirdPartyAudit.ignoreMissingClasses() +tasks.thirdPartyAudit.ignoreViolations( + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' +) + tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java index dbea8db1a12fa..c38fcc468c673 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java @@ -42,11 +42,13 @@ import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.util.concurrent.TimeUnit; public class RemoteClustersIT extends AbstractMultiClusterRemoteTestCase { @@ -112,7 +114,7 @@ public void testSniffModeConnectionFails() throws IOException { assertFalse(rci.isConnected()); } - public void testHAProxyModeConnectionWorks() throws IOException { + public void testHAProxyModeConnectionWorks() throws Exception { String proxyAddress = "haproxy:9600"; logger.info("Configuring remote cluster [{}]", proxyAddress); ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder() @@ -121,12 +123,14 @@ public void testHAProxyModeConnectionWorks() throws IOException { .build()); assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged()); - RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0); - logger.info("Connection info: {}", rci); - if (!rci.isConnected()) { - logger.info("Cluster health: {}", cluster1Client().cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT)); - } - assertTrue(rci.isConnected()); + assertBusy(() -> { + RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0); + logger.info("Connection info: {}", rci); + if (!rci.isConnected()) { + logger.info("Cluster health: {}", cluster1Client().cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT)); + } + assertTrue(rci.isConnected()); + }, 10, TimeUnit.SECONDS); assertEquals(2L, cluster1Client().search( new SearchRequest("haproxynosn:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value); diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..e669016cad98a --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml @@ -0,0 +1,16 @@ +--- +"Insert Document with geoshape field": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..30a39447905c0 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml @@ -0,0 +1,28 @@ +--- +"Create index with Geoshape field": + - do: + indices.create: + index: geo_shape_index_old + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..4c7b12a7f1909 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml @@ -0,0 +1,61 @@ +--- +"Validate we are able to index documents after upgrade": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } + + +--- +"Create index with Geoshape field in new cluster": + - do: + indices.create: + index: geo_shape_index_new + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_new", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_new", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + body: + aggregations: + myaggregation: + geo_bounds: + field: "location" + - match: { hits.total: 2 } + - match: { aggregations.myaggregation.bounds.top_left.lat: 0.9999999823048711 } + - match: { aggregations.myaggregation.bounds.top_left.lon: 99.99999999068677 } + - match: { aggregations.myaggregation.bounds.bottom_right.lat: 0.0 } + - match: { aggregations.myaggregation.bounds.bottom_right.lon: 105.99999996833503 } diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md new file mode 100644 index 0000000000000..040cc053469ed --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -0,0 +1,68 @@ +## 2023-10-12 Version 2.11.0 Release Notes + +## [2.11] + +### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) +- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Add parallel file download support for remote store based replication ([#8596](https://github.com/opensearch-project/OpenSearch/pull/8596)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) + +### Dependencies +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) +- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) +- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) +- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) +- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) +- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) + +### Changed +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) +- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) +- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) +- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) + +### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) + +### Fixed +- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) diff --git a/release-notes/opensearch.release-notes-2.11.1.md b/release-notes/opensearch.release-notes-2.11.1.md new file mode 100644 index 0000000000000..06613558de177 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.1.md @@ -0,0 +1,10 @@ +## 2023-11-20 Version 2.11.1 Release Notes + +## [2.11.1] + +### Changed +- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) + +### Fixed +- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) +- Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index b9089689b0cf1..3b7ea15164e9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -156,3 +156,23 @@ query: {"range": { "rank": { "from": 0 } } } track_total_hits: false size: 3 + +--- +"Index Sort half float": + - do: + catch: bad_request + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.sort.field: rank + mappings: + properties: + rank: + type: half_float + + # This should failed with 400 as half_float is not supported for index sort + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index 1ddba45c97c72..fb5c3268a3c82 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -164,3 +164,99 @@ setup: - match: { aggregations.date.buckets.1.key: 1540857600000 } - match: { aggregations.date.buckets.1.key_as_string: "2018-10-30T00:00:00.000Z" } - match: { aggregations.date.buckets.1.doc_count: 2 } + + +--- +"date with nested sort now": + - skip: + version: " - 2.11.99" + reason: fixed in 2.12.0 + + # This tests cover scenario where nested sort have now() in date field type. + # For this test, we have date field as nested field and we trigger asc/desc sort + # on that nested field. `filter` clause is needed when we sort any nested field, + # like in this case, "gte": "now/h" says sort nested field date_field only where + # document is having value greater than current time now(). + # Nested field sort query doesn't sort documents if it is not qualified through + # `filter` clause. + # Only adding tests for `gte` as `lte` would be same behaviour + + - do: + indices.create: + index: test + body: + mappings: + properties: + nested_field: + type: nested + properties: + date_field: + type: date + format: date_optional_time + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"nested_field": [{"date_field": "3023-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3024-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3025-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3026-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3027-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2022-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2023-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2021-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2020-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2019-10-26T12:00:00+09:00"}]} + + # gte: now/h with the desc sort + - do: + search: + index: test + body: + size: 5 + sort: [{ nested_field.date_field: { mode: max, order: desc, nested: { path: nested_field, filter: { bool: { filter : [{ range : { nested_field.date_field: { gte: now/h, time_zone: +09:00} } }] } } } } } ] + - match: {hits.total.value: 10 } + - length: {hits.hits: 5 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.nested_field.0.date_field: "3027-10-26T12:00:00+09:00" } + - match: { hits.hits.0.sort: [33381428400000] } + - match: { hits.hits.1._source.nested_field.0.date_field: "3026-10-26T12:00:00+09:00" } + - match: { hits.hits.1.sort: [ 33349892400000 ] } + - match: { hits.hits.2._source.nested_field.0.date_field: "3025-10-26T12:00:00+09:00" } + - match: { hits.hits.2.sort: [ 33318356400000 ] } + - match: { hits.hits.3._source.nested_field.0.date_field: "3024-10-26T12:00:00+09:00" } + - match: { hits.hits.3.sort: [ 33286820400000 ] } + - match: { hits.hits.4._source.nested_field.0.date_field: "3023-10-26T12:00:00+09:00" } + - match: { hits.hits.4.sort: [ 33255198000000 ] } + + # gte: now/h with the asc sort + - do: + search: + index: test + body: + size: 5 + sort: [ { nested_field.date_field: { mode: max, order: asc, nested: { path: nested_field, filter: { bool: { filter: [ { range: { nested_field.date_field: { gte: now/h, time_zone: +09:00 } } } ] } } } } } ] + - match: { hits.total.value: 10 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.nested_field.0.date_field: "3023-10-26T12:00:00+09:00" } + - match: { hits.hits.0.sort: [ 33255198000000 ] } + - match: { hits.hits.1._source.nested_field.0.date_field: "3024-10-26T12:00:00+09:00" } + - match: { hits.hits.1.sort: [ 33286820400000 ] } + - match: { hits.hits.2._source.nested_field.0.date_field: "3025-10-26T12:00:00+09:00" } + - match: { hits.hits.2.sort: [ 33318356400000 ] } + - match: { hits.hits.3._source.nested_field.0.date_field: "3026-10-26T12:00:00+09:00" } + - match: { hits.hits.3.sort: [ 33349892400000 ] } + - match: { hits.hits.4._source.nested_field.0.date_field: "3027-10-26T12:00:00+09:00" } + - match: { hits.hits.4.sort: [ 33381428400000 ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml index ba2b18eb3b6d0..7385eef051243 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml @@ -20,6 +20,7 @@ properties: counter: type: double + - do: bulk: refresh: true @@ -119,3 +120,87 @@ - match: { status: 400 } - match: { error.type: search_phase_execution_exception } - match: { error.caused_by.reason: "Can't do sort across indices, as a field has [unsigned_long] type in one index, and different type in another index!" } + +--- +"search across indices with mixed long and double and float numeric types": + - skip: + version: " - 2.11.0" + reason: half float was broken before 2.11.1 + + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + counter: + type: long + + - do: + indices.create: + index: test_2 + body: + mappings: + properties: + counter: + type: double + + - do: + indices.create: + index: test_3 + body: + mappings: + properties: + counter: + type: half_float + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + - counter: 223372036854775800 + - index: + _index: test_2 + - counter: 1223372036854775800.23 + - index: + _index: test_2 + - counter: 184.4 + - index: + _index: test_3 + - counter: 187.4 + - index: + _index: test_3 + - counter: 194.4 + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: desc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 1223372036854775800.23 } + - match: { hits.hits.0.sort.0: 1223372036854775800.23 } + - match: { hits.hits.1._index: test_1 } + - match: { hits.hits.1._source.counter: 223372036854775800 } + - match: { hits.hits.1.sort.0: 223372036854775800 } + - match: { hits.hits.2._index: test_3 } + - match: { hits.hits.2._source.counter: 194.4 } + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: asc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 184.4 } + - match: { hits.hits.0.sort.0: 184.4 } + - match: { hits.hits.1._index: test_3 } + - match: { hits.hits.1._source.counter: 187.4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml new file mode 100644 index 0000000000000..8829e7b100fdd --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml @@ -0,0 +1,46 @@ +--- +"search on keyword fields with doc_values enabled": + - do: + indices.create: + index: test + body: + mappings: + properties: + "some_keyword": + type: "keyword" + index: true + doc_values: true + + - do: + bulk: + index: test + refresh: true + body: + - '{"index": {"_index": "test", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data" }' + - '{ "index": { "_index": "test", "_id": "2" }}' + - '{ "some_keyword": "400" }' + - '{ "index": { "_index": "test", "_id": "3" } }' + - '{ "some_keyword": "5" }' + + - do: + search: + index: test + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + index: test + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total.value: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 55e1566656faf..ebecb63dedbaf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -219,7 +219,7 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 08:30:04.828733" } - match: {hits.hits.0.sort: [1571646604828733000] } - # search_after with the sort + # search_after with the asc sort - do: search: index: test @@ -320,3 +320,468 @@ - length: { hits.hits: 1 } - match: { hits.hits.0._index: test } - match: { hits.hits.0._source.population: null } + +--- +"half float": + - skip: + version: " - 2.11.0" + reason: half_float was broken for 2.11.0 and earlier + + - do: + indices.create: + index: test + body: + mappings: + properties: + population: + type: half_float + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": 184.4} + {"index":{}} + {"population": 194.4} + {"index":{}} + {"population": 144.4} + {"index":{}} + {"population": 174.4} + {"index":{}} + {"population": 164.4} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: desc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 184.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: asc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 164.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + # search_after with the asc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: asc } ] + search_after: [ 184.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + + # search_after with the desc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: desc } ] + search_after: [ 164.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + + # search_after with the asc sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "asc", "missing": "_last" } } ] + search_after: [ 200 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + + # search_after with the desc sort with missing + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "desc", "missing": "_last" } } ] + search_after: [ 100 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + +--- +"numeric skipping logic with competitive missing value": + - skip: + version: " - 2.12.99" + reason: newly added test, supported from 3.0.0 + +# This test checks if skipping logic is skipped in case missing values are competitive +# for all numeric type int, long, float, half_float, double, unsigned_long. +# We are inserting 24 documents with some missing values and giving search after parameter +# as missing value. The secondary sort field is on id which doesn't have missing value. +# In case skipping logic is applied in Lucene, it will skipp all documents with primary sort field +# missing value even though it should list sort by secondary field id with missing value primary field. +# This test is addressing bugs like here https://github.com/opensearch-project/OpenSearch/issues/9537 + + - do: + indices.create: + index: test + body: + mappings: + properties: + halffloat: + type: half_float + long: + type: long + int: + type: integer + float: + type: float + double: + type: double + unsignedlong: + type: unsigned_long + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"id": 1, "halffloat": 1, "long": 1, "int": 1, "float": 1, "double": 1, "unsignedlong": 1} + {"index":{}} + {"id": 2, "halffloat": 2, "long": 2, "int": 2, "float": 2, "double": 2, "unsignedlong": 2} + {"index":{}} + {"id": 3, "halffloat": 3, "long": 3, "int": 3, "float": 3, "double": 3, "unsignedlong": 3} + {"index":{}} + {"id": 4, "halffloat": 4, "long": 4, "int": 4, "float": 4, "double": 4, "unsignedlong": 4} + {"index":{}} + {"id": 5, "halffloat": 5, "long": 5, "int": 5, "float": 5, "double": 5, "unsignedlong": 5} + {"index":{}} + {"id": 6, "halffloat": 6, "long": 6, "int": 6, "float": 6, "double": 6, "unsignedlong": 6} + {"index":{}} + {"id": 7, "halffloat": 7, "long": 7, "int": 7, "float": 7, "double": 7, "unsignedlong": 7} + {"index":{}} + {"id": 8, "halffloat": 8, "long": 8, "int": 8, "float": 8, "double": 8, "unsignedlong": 8} + {"index":{}} + {"id": 9, "halffloat": 9, "long": 9, "int": 9, "float": 9, "double": 9, "unsignedlong": 9} + {"index":{}} + {"id": 10, "halffloat": 10, "long": 10, "int": 10, "float": 10, "double": 10, "unsignedlong": 10} + {"index":{}} + {"id": 11, "halffloat": 11, "long": 11, "int": 11, "float": 11, "double": 11, "unsignedlong": 11} + {"index":{}} + {"id": 12, "halffloat": 12, "long": 12, "int": 12, "float": 12, "double": 12, "unsignedlong": 12} + {"index":{}} + {"id": 13, "halffloat": 13, "long": 13, "int": 13, "float": 13, "double": 13, "unsignedlong": 13} + {"index":{}} + {"id": 14, "halffloat": 14, "long": 14, "int": 14, "float": 14, "double": 14, "unsignedlong": 14} + {"index":{}} + {"id": 15, "halffloat": 15, "long": 15, "int": 15, "float": 15, "double": 15, "unsignedlong": 15} + {"index":{}} + {"id": 16, "halffloat": 16, "long": 16, "int": 16, "float": 16, "double": 16, "unsignedlong": 16} + {"index":{}} + {"id": 17, "halffloat": 17, "long": 17, "int": 17, "float": 17, "double": 17, "unsignedlong": 17} + {"index":{}} + {"id": 18, "halffloat": 18, "long": 18, "int": 18, "float": 18, "double": 18, "unsignedlong": 18} + {"index":{}} + {"id": 19, "halffloat": 19, "long": 19, "int": 19, "float": 19, "double": 19, "unsignedlong": 19} + {"index":{}} + {"id": 20, "halffloat": 20, "long": 20, "int": 20, "float": 20, "double": 20, "unsignedlong": 20} + {"index":{}} + {"id": 21, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 22, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 23, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 24, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "halffloat": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.halffloat: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.halffloat: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.halffloat: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "halffloat": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.halffloat: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.halffloat: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.halffloat: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "long": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.long: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.long: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.long: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "long": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.long: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.long: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.long: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "int": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.int: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.int: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.int: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "int": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.int: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.int: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.int: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "float": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.float: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.float: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.float: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "float": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.float: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.float: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.float: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "double": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.double: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.double: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.double: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "double": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.double: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.double: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.double: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "unsignedlong": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.unsignedlong: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.unsignedlong: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.unsignedlong: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "unsignedlong": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.unsignedlong: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.unsignedlong: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.unsignedlong: null } + - match: { hits.hits.2._source.id: 22 } diff --git a/server/build.gradle b/server/build.gradle index f6db3d53a0dcc..17b8ff0469ad8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -154,6 +154,10 @@ dependencies { // jcraft api "com.jcraft:jzlib:${versions.jzlib}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + // protobuf api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" @@ -364,11 +368,15 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor' + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1', + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' ) } tasks.named("dependencyLicenses").configure { + mapping from: /reactor-.*/, to: 'reactor' mapping from: /lucene-.*/, to: 'lucene' dependencies = project.configurations.runtimeClasspath.fileCollection { it.group.startsWith('org.opensearch') == false || @@ -395,6 +403,7 @@ tasks.named("licenseHeaders").configure { } tasks.test { + environment "node.roles.test", "[]" if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_1_8) { jvmArgs += ["--add-opens", "java.base/java.nio.file=ALL-UNNAMED"] } diff --git a/server/licenses/log4j-api-2.20.0.jar.sha1 b/server/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/server/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/server/licenses/log4j-api-2.21.0.jar.sha1 b/server/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/server/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/server/licenses/log4j-core-2.20.0.jar.sha1 b/server/licenses/log4j-core-2.20.0.jar.sha1 deleted file mode 100644 index 49c972626563b..0000000000000 --- a/server/licenses/log4j-core-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb2a9a47b1396e00b5eee1264296729a70565cc0 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.21.0.jar.sha1 b/server/licenses/log4j-core-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..c88e6f7a25ca9 --- /dev/null +++ b/server/licenses/log4j-core-2.21.0.jar.sha1 @@ -0,0 +1 @@ +122e1a9e0603cc9eae07b0846a6ff01f2454bc49 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.20.0.jar.sha1 b/server/licenses/log4j-jul-2.20.0.jar.sha1 deleted file mode 100644 index a456651e4569e..0000000000000 --- a/server/licenses/log4j-jul-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8170e6118eac1ab332046c179718a0f107f688e1 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.21.0.jar.sha1 b/server/licenses/log4j-jul-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..480010840abca --- /dev/null +++ b/server/licenses/log4j-jul-2.21.0.jar.sha1 @@ -0,0 +1 @@ +f0da61113f4a47654677e6a98b1e13ca7de2483d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 b/server/licenses/reactive-streams-1.0.4.jar.sha1 similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 rename to server/licenses/reactive-streams-1.0.4.jar.sha1 diff --git a/plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt b/server/licenses/reactive-streams-LICENSE.txt similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt rename to server/licenses/reactive-streams-LICENSE.txt diff --git a/plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactive-streams-NOTICE.txt similarity index 100% rename from plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt rename to server/licenses/reactive-streams-NOTICE.txt diff --git a/server/licenses/reactor-LICENSE.txt b/server/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/server/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-azure/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactor-NOTICE.txt similarity index 100% rename from plugins/repository-azure/licenses/reactive-streams-NOTICE.txt rename to server/licenses/reactor-NOTICE.txt diff --git a/server/licenses/reactor-core-3.5.11.jar.sha1 b/server/licenses/reactor-core-3.5.11.jar.sha1 new file mode 100644 index 0000000000000..e5ffdbc8a7840 --- /dev/null +++ b/server/licenses/reactor-core-3.5.11.jar.sha1 @@ -0,0 +1 @@ +db2299757f562261eb775d13658e86ff06f91e8a \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java new file mode 100644 index 0000000000000..a081110e6c5a1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.Version; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testCreateCloneIndex() { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = randomIntBetween(1, 5); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get() + ); + ensureGreen(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + assertThat(targetStats.getIndex("target").getIndexShards().keySet().size(), equalTo(numPrimaryShards)); + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java new file mode 100644 index 0000000000000..282eb9c6ad95e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -0,0 +1,545 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.InternalClusterInfoService; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateShrinkIndexToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + int[][] possibleShardSplits = new int[][] { { 8, 4, 2 }, { 9, 3, 1 }, { 4, 2, 1 }, { 15, 5, 1 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 4 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("first_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("first_shrink") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 2 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_shrink", "second_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_shrink") + .setSettings(Settings.builder().putNull("index.routing.allocation.include._id").put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("second_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testShrinkIndexPrimaryTerm() throws Exception { + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); + + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); + final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + final String mergeNode = discoveryNodes[0].getName(); + // This needs more than the default timeout if a large number of shards were created. + ensureGreen(TimeValue.timeValueSeconds(120)); + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + // relocate all shards to one node such that we can merge it. + final Settings.Builder prepareShrinkSettings = Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now merge source into target + final Settings shrinkSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .build(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); + + ensureGreen(TimeValue.timeValueSeconds(120)); + + final IndexMetadata afterShrinkIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(afterShrinkIndexMetadata.primaryTerm(shardId), equalTo(beforeShrinkPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateShrinkIndex() { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true) + ) + .get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + + // now merge source into a single shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + + // resolve true merge node - this is not always the node we required as all shards may be on another node + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("merge node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + /** + * Tests that we can manually recover from a failed allocation due to shards being moved away etc. + */ + public void testCreateShrinkIndexFails() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String spareNode = discoveryNodes[0].getName(); + String mergeNode = discoveryNodes[1].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // now merge source into a single shard index + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings( + Settings.builder() + .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up + .put("index.number_of_replicas", 0) + .put("index.allocation.max_retries", 1) + .build() + ) + .get(); + client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + + // now we move all shards away from the merge node + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)) + .get(); + ensureGreen("source"); + + client().admin() + .indices() + .prepareUpdateSettings("target") // erase the forcefully fuckup! + .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")) + .get(); + // wait until it fails + assertBusy(() -> { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + RoutingTable routingTables = clusterStateResponse.getState().routingTable(); + assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned()); + assertEquals( + UnassignedInfo.Reason.ALLOCATION_FAILED, + routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason() + ); + assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations()); + }); + client().admin() + .indices() + .prepareUpdateSettings("source") // now relocate them all to the right node + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)) + .get(); + ensureGreen("source"); + + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance( + ClusterInfoService.class, + internalCluster().getClusterManagerName() + ); + infoService.refresh(); + // kick off a retry and wait until it's done! + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + long expectedShardSize = clusterRerouteResponse.getState() + .routingTable() + .index("target") + .shard(0) + .getShards() + .get(0) + .getExpectedShardSize(); + // we support the expected shard size in the allocator to sum up over the source index shards + assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testCreateShrinkWithIndexSort() throws Exception { + SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); + expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); + Sort expectedIndexSort = new Sort(expectedSortField); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("sort.field", "id") + .put("sort.order", "desc") + .put("number_of_shards", 8) + .put("number_of_replicas", 0) + ).setMapping("id", "type=keyword,doc_values=true").get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + flushAndRefresh(); + assertSortedSegments("source", expectedIndexSort); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // check that index sort cannot be set on the target index + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .put("index.sort.field", "foo") + .build() + ) + .get() + ); + assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); + + // check that the index sort order of `source` is correctly applied to the `target` + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + flushAndRefresh(); + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("target").execute().actionGet(); + assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); + assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); + assertSortedSegments("target", expectedIndexSort); + + // ... and that the index sort is also applied to updates + for (int i = 20; i < 40; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertSortedSegments("target", expectedIndexSort); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java new file mode 100644 index 0000000000000..dd4252d24f314 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -0,0 +1,506 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateSplitIndexToN() throws IOException { + int[][] possibleShardSplits = new int[][] { { 2, 4, 8 }, { 3, 6, 12 }, { 1, 2, 4 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + splitToN(shardSplits[0], shardSplits[1], shardSplits[2]); + } + + public void testSplitFromOneToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + splitToN(1, 5, 10); + client().admin().indices().prepareDelete("*").get(); + int randomSplit = randomIntBetween(2, 6); + splitToN(1, randomSplit, randomSplit * 2); + } + + private void splitToN(int sourceShards, int firstSplitShards, int secondSplitShards) { + + assertEquals(sourceShards, (sourceShards * firstSplitShards) / firstSplitShards); + assertEquals(firstSplitShards, (firstSplitShards * secondSplitShards) / secondSplitShards); + internalCluster().ensureAtLeastNumDataNodes(2); + final boolean useRouting = randomBoolean(); + final boolean useNested = randomBoolean(); + final boolean useMixedRouting = useRouting ? randomBoolean() : false; + CreateIndexRequestBuilder createInitialIndex = prepareCreate("source"); + Settings.Builder settings = Settings.builder().put(indexSettings()).put("number_of_shards", sourceShards); + final boolean useRoutingPartition; + if (randomBoolean()) { + // randomly set the value manually + int routingShards = secondSplitShards * randomIntBetween(1, 10); + settings.put("index.number_of_routing_shards", routingShards); + useRoutingPartition = false; + } else { + useRoutingPartition = randomBoolean(); + } + if (useRouting && useMixedRouting == false && useRoutingPartition) { + int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(secondSplitShards, Version.CURRENT) - 1; + settings.put("index.routing_partition_size", randomIntBetween(1, numRoutingShards)); + if (useNested) { + createInitialIndex.setMapping("_routing", "required=true", "nested1", "type=nested"); + } else { + createInitialIndex.setMapping("_routing", "required=true"); + } + } else if (useNested) { + createInitialIndex.setMapping("nested1", "type=nested"); + } + logger.info("use routing {} use mixed routing {} use nested {}", useRouting, useMixedRouting, useNested); + createInitialIndex.setSettings(settings).get(); + + int numDocs = randomIntBetween(10, 50); + String[] routingValue = new String[numDocs]; + + BiFunction indexFunc = (index, id) -> { + try { + return client().prepareIndex(index) + .setId(Integer.toString(id)) + .setSource( + jsonBuilder().startObject() + .field("foo", "bar") + .field("i", id) + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1_1") + .field("n_field2", "n_value2_1") + .endObject() + .startObject() + .field("n_field1", "n_value1_2") + .field("n_field2", "n_value2_2") + .endObject() + .endArray() + .endObject() + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); + if (useMixedRouting && randomBoolean()) { + routingValue[i] = null; + } else { + routingValue[i] = routing; + } + builder.setRouting(routingValue[i]); + } + builder.get(); + } + + if (randomBoolean()) { + for (int i = 0; i < numDocs; i++) { // let's introduce some updates / deletes on the index + if (randomBoolean()) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + } + } + + ensureYellow(); + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + Settings.Builder firstSplitSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", firstSplitShards) + .putNull("index.blocks.write"); + if (sourceShards == 1 && useRoutingPartition == false && randomBoolean()) { // try to set it if we have a source index with 1 shard + firstSplitSettingsBuilder.put("index.number_of_routing_shards", secondSplitShards); + } + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_split") + .setResizeType(ResizeType.SPLIT) + .setSettings(firstSplitSettingsBuilder.build()) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("first_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("first_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + + client().admin() + .indices() + .prepareUpdateSettings("first_split") + .setSettings(Settings.builder().put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now split source into a new index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_split", "second_split") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", secondSplitShards) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_split") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("second_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + if (useNested) { + assertNested("source", numDocs); + assertNested("first_split", numDocs); + assertNested("second_split", numDocs); + } + assertAllUniqueDocs( + client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs( + client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + } + + public void assertNested(String index, int numDocs) { + // now, do a nested query + SearchResponse searchResponse = client().prepareSearch(index) + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); + } + + public void assertAllUniqueDocs(SearchResponse response, int numDocs) { + Set ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID " + id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + } + + public void testSplitIndexPrimaryTerm() throws Exception { + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("number_of_shards", numberOfShards) + .put("index.number_of_routing_shards", numberOfTargetShards) + ).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + final Settings.Builder prepareSplitSettings = Settings.builder().put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareSplitSettings).get(); + ensureYellow(); + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeSplitPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now split source into target + final Settings splitSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .putNull("index.blocks.write") + .build(); + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(splitSettings) + .get() + ); + + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata aftersplitIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(aftersplitIndexMetadata.primaryTerm(shardId), equalTo(beforeSplitPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateSplitIndex() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 2) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("split node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 50ff76c6b62f3..82ab5b0118c0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -37,6 +37,7 @@ public AcknowledgedResponse createDataStream(String name) throws Exception { CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(name); AcknowledgedResponse response = client().admin().indices().createDataStream(request).get(); assertThat(response.isAcknowledged(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -67,6 +68,7 @@ public RolloverResponse rolloverDataStream(String name) throws Exception { RolloverResponse response = client().admin().indices().rolloverIndex(request).get(); assertThat(response.isAcknowledged(), is(true)); assertThat(response.isRolledOver(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -109,5 +111,4 @@ public AcknowledgedResponse deleteIndexTemplate(String name) throws Exception { assertThat(response.isAcknowledged(), is(true)); return response; } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 1463c45aa9b2f..79f6ba6dfa642 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -199,6 +200,8 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc } } + ClusterStateStats clusterStateStats = internalCluster().clusterService().getClusterManagerService().getClusterStateStats(); + assertTrue(clusterStateStats.getUpdateFailed() > 0); }); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 6fcc89cfe9e9a..dfde1b958882c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -8,9 +8,12 @@ package org.opensearch.gateway.remote; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.DiscoveryStats; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -19,6 +22,7 @@ import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Map; +import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; @@ -86,14 +90,105 @@ public void testFullClusterRestoreStaleDelete() throws Exception { assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); - Map indexMetadataMap = remoteClusterStateService.getLatestIndexMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( cluster().getClusterName(), getClusterState().metadata().clusterUUID() - ); + ).getMetadata().getIndices(); assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); } + public void testRemoteStateStats() { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String clusterManagerNode = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().getDataNodeNames().stream().collect(Collectors.toList()).get(0); + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(clusterManagerNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + + // assert cluster state stats + assertClusterManagerClusterStateStats(nodesStatsResponse); + + NodesStatsResponse nodesStatsResponseDataNode = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + // assert cluster state stats for data node + DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); + assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); + assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + + // call nodes/stats with nodeId filter + NodesStatsResponse nodesStatsNodeIdFilterResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(clusterManagerNode) + .get(); + + assertClusterManagerClusterStateStats(nodesStatsNodeIdFilterResponse); + } + + private void assertClusterManagerClusterStateStats(NodesStatsResponse nodesStatsResponse) { + // assert cluster state stats + DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); + + assertNotNull(discoveryStats.getClusterStateStats()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateSuccess() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getUpdateFailed()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() > 0); + // assert remote state stats + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getSuccessCount() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getFailedCount()); + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getTotalTimeInMillis() > 0); + } + + public void testRemoteStateStatsFromAllNodes() { + int shardCount = randomIntBetween(1, 5); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String[] allNodes = internalCluster().getNodeNames(); + // call _nodes/stats/discovery from all the nodes + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + + // call _nodes/stats/discovery from all the nodes with random nodeId filter + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(allNodes[randomIntBetween(0, allNodes.length - 1)]) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + } + + private void validateNodesStatsResponse(NodesStatsResponse nodesStatsResponse) { + // _nodes/stats/discovery must never fail due to any exception + assertFalse(nodesStatsResponse.toString().contains("exception")); + assertNotNull(nodesStatsResponse.getNodes()); + assertNotNull(nodesStatsResponse.getNodes().get(0)); + assertNotNull(nodesStatsResponse.getNodes().get(0).getDiscoveryStats()); + } + private void setReplicaCount(int replicaCount) { client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 98a22717019cf..848f6eddbb0df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -539,7 +539,7 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 4); } - public void testCacheWithFilteredAlias() { + public void testCacheWithFilteredAlias() throws InterruptedException { Client client = client(); Settings settings = Settings.builder() .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) @@ -562,6 +562,8 @@ public void testCacheWithFilteredAlias() { OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); + indexRandomForConcurrentSearch("index"); + assertCacheState(client, "index", 0, 0); SearchResponse r1 = client.prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java new file mode 100644 index 0000000000000..66b26b5d25cfe --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * These tests simulate corruption cases during replication. They are skipped on WindowsFS simulation where file renaming + * can fail with an access denied IOException because deletion is not permitted. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class SegmentReplicationDisruptionIT extends SegmentReplicationBaseIT { + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + public void testSendCorruptBytesToReplica() throws Exception { + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean failed = new AtomicBoolean(false); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { + FileChunkRequest req = (FileChunkRequest) request; + TransportRequest corrupt = new FileChunkRequest( + req.recoveryId(), + ((FileChunkRequest) request).requestSeqNo(), + ((FileChunkRequest) request).shardId(), + ((FileChunkRequest) request).metadata(), + ((FileChunkRequest) request).position(), + new BytesArray("test"), + false, + 0, + 0L + ); + connection.sendRequest(requestId, action, corrupt, options); + latch.countDown(); + } else { + connection.sendRequest(requestId, action, request, options); + } + } + ); + for (int i = 0; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + assertNotEquals(originalRecoveryTime, 0); + refresh(INDEX_NAME); + latch.await(); + assertTrue(failed.get()); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + // reset checkIndex to ensure our original shard doesn't throw + resetCheckIndexStatus(); + waitForSearchableDocs(100, primaryNode, replicaNode); + } + + public void testWipeSegmentBetweenSyncs() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); + indexShard.store().directory().deleteFile("_0.si"); + + for (int i = 11; i < 21; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + resetCheckIndexStatus(); + waitForSearchableDocs(20, primaryNode, replicaNode); + } + + private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { + assertBusy(() -> { + // assert we have a peer recovery after the original + final long time = getRecoveryStopTime(replicaNode); + assertNotEquals(time, 0); + assertNotEquals(originalRecoveryTime, time); + + }, 1, TimeUnit.MINUTES); + } + + private long getRecoveryStopTime(String nodeName) { + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); + final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); + for (RecoveryState recoveryState : recoveryStates) { + if (recoveryState.getTargetNode().getName().equals(nodeName)) { + return recoveryState.getTimer().stopTime(); + } + } + return 0L; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 81556cc270151..5511bc7945d65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -22,9 +22,9 @@ import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.get.GetResponse; @@ -59,11 +59,11 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -73,7 +73,6 @@ import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.recovery.FileChunkRequest; -import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.NodeClosedException; @@ -84,8 +83,8 @@ import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; -import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.junit.Before; @@ -98,7 +97,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -240,7 +239,7 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(0, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -257,7 +256,7 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -276,7 +275,7 @@ public void testIndexReopenClose() throws Exception { createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(100, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -311,7 +310,7 @@ public void testScrollWithConcurrentIndexAndSearch() throws Exception { ensureGreen(INDEX_NAME); final List> pendingIndexResponses = new ArrayList<>(); final List> pendingSearchResponse = new ArrayList<>(); - final int searchCount = randomIntBetween(10, 20); + final int searchCount = randomIntBetween(1, 2); final WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); for (int i = 0; i < searchCount; i++) { @@ -356,6 +355,7 @@ public void testScrollWithConcurrentIndexAndSearch() throws Exception { waitForSearchableDocs(INDEX_NAME, 2 * searchCount, List.of(primary, replica)); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testMultipleShards() throws Exception { Settings indexSettings = Settings.builder() .put(super.indexSettings()) @@ -369,7 +369,7 @@ public void testMultipleShards() throws Exception { createIndex(INDEX_NAME, indexSettings); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(1, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -386,7 +386,7 @@ public void testMultipleShards() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -405,8 +405,8 @@ public void testReplicationAfterForceMerge() throws Exception { createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(0, 10); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; try ( BackgroundIndexer indexer = new BackgroundIndexer( @@ -511,7 +511,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { connection.sendRequest(requestId, action, request, options); } ); - final int docCount = scaledRandomIntBetween(10, 200); + final int docCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < docCount; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } @@ -572,7 +572,7 @@ public void testCancellation() throws Exception { } ); - final int docCount = scaledRandomIntBetween(0, 200); + final int docCount = scaledRandomIntBetween(0, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -632,13 +632,14 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { verifyStoreContent(); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testDeleteOperations() throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(1, 20); + final int initialDocCount = scaledRandomIntBetween(1, 5); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -655,7 +656,7 @@ public void testDeleteOperations() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 20); + final int additionalDocCount = scaledRandomIntBetween(0, 2); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -684,14 +685,14 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { createIndex(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(10, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < initialDocCount; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); } refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, primary, replica); - final int deletedDocCount = randomIntBetween(10, initialDocCount); + final int deletedDocCount = randomIntBetween(1, initialDocCount); for (int i = 0; i < deletedDocCount; i++) { client(primary).prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); } @@ -712,7 +713,7 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { ); // add some docs to the xlog and drop primary. - final int additionalDocs = randomIntBetween(1, 50); + final int additionalDocs = randomIntBetween(1, 5); for (int i = initialDocCount; i < initialDocCount + additionalDocs; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); } @@ -743,7 +744,7 @@ public void testUpdateOperations() throws Exception { final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(1, 5); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -760,7 +761,7 @@ public void testUpdateOperations() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, asList(primary, replica)); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 5); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -795,7 +796,7 @@ public void testDropPrimaryDuringReplication() throws Exception { final List dataNodes = internalCluster().startDataOnlyNodes(6); ensureGreen(INDEX_NAME); - int initialDocCount = scaledRandomIntBetween(100, 200); + int initialDocCount = scaledRandomIntBetween(5, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -829,6 +830,7 @@ public void testDropPrimaryDuringReplication() throws Exception { } } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testReplicaHasDiffFilesThanPrimary() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); @@ -840,7 +842,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.APPEND); // create a doc to index - int numDocs = 2 + random().nextInt(100); + int numDocs = 2 + random().nextInt(10); List docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { @@ -869,7 +871,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { replicaShard.finalizeReplication(segmentInfos); ensureYellow(INDEX_NAME); - final int docCount = scaledRandomIntBetween(10, 200); + final int docCount = scaledRandomIntBetween(10, 20); for (int i = 0; i < docCount; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); // Refresh, this should trigger round of segment replication @@ -889,7 +891,7 @@ public void testPressureServiceStats() throws Exception { final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - int initialDocCount = scaledRandomIntBetween(100, 200); + int initialDocCount = scaledRandomIntBetween(10, 20); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -995,8 +997,8 @@ public void testScrollCreatedOnReplica() throws Exception { final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - // index 100 docs - for (int i = 0; i < 100; i++) { + // index 10 docs + for (int i = 0; i < 10; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource(jsonBuilder().startObject().field("field", i).endObject()) @@ -1032,7 +1034,7 @@ public void testScrollCreatedOnReplica() throws Exception { // force call flush flush(INDEX_NAME); - for (int i = 3; i < 50; i++) { + for (int i = 3; i < 5; i++) { client().prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); refresh(INDEX_NAME); if (randomBoolean()) { @@ -1068,9 +1070,14 @@ public void testScrollCreatedOnReplica() throws Exception { client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up post scroll clear request", currentFiles.containsAll(snapshottedSegments)); - assertEquals(100, scrollHits); + assertBusy( + () -> assertFalse( + "Files should be cleaned up post scroll clear request", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); + assertEquals(10, scrollHits); + } /** @@ -1215,19 +1222,8 @@ public void testPitCreatedOnReplica() throws Exception { ensureYellowAndNoInitializingShards(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - client().prepareIndex(INDEX_NAME) - .setId("1") - .setSource("foo", randomInt()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - refresh(INDEX_NAME); - client().prepareIndex(INDEX_NAME) - .setId("2") - .setSource("foo", randomInt()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - for (int i = 3; i < 100; i++) { + for (int i = 0; i < 10; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource("foo", randomInt()) @@ -1277,7 +1273,7 @@ public void testPitCreatedOnReplica() throws Exception { } flush(INDEX_NAME); - for (int i = 101; i < 200; i++) { + for (int i = 11; i < 20; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource("foo", randomInt()) @@ -1329,9 +1325,12 @@ public void testPitCreatedOnReplica() throws Exception { // delete the PIT DeletePitRequest deletePITRequest = new DeletePitRequest(pitResponse.getId()); client().execute(DeletePitAction.INSTANCE, deletePITRequest).actionGet(); - - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up", currentFiles.containsAll(snapshottedSegments)); + assertBusy( + () -> assertFalse( + "Files should be cleaned up", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); } /** @@ -1782,134 +1781,57 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { } - public void testSendCorruptBytesToReplica() throws Exception { - // this test stubs transport calls specific to node-node replication. - assumeFalse( - "Skipping the test as its not compatible with segment replication with remote store.", - segmentReplicationWithRemoteEnabled() - ); + public void testReplicaAlreadyAtCheckpoint() throws Exception { + final List nodes = new ArrayList<>(); final String primaryNode = internalCluster().startDataOnlyNode(); - createIndex( - INDEX_NAME, - Settings.builder() - .put(indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.refresh_interval", -1) - .build() - ); - ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataOnlyNode(); + nodes.add(primaryNode); + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); - - MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( - TransportService.class, - primaryNode - )); - CountDownLatch latch = new CountDownLatch(1); - AtomicBoolean failed = new AtomicBoolean(false); - primaryTransportService.addSendBehavior( - internalCluster().getInstance(TransportService.class, replicaNode), - (connection, requestId, action, request, options) -> { - if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { - FileChunkRequest req = (FileChunkRequest) request; - logger.info("SENDING CORRUPT file chunk [{}] lastChunk: {}", req, req.lastChunk()); - TransportRequest corrupt = new FileChunkRequest( - req.recoveryId(), - ((FileChunkRequest) request).requestSeqNo(), - ((FileChunkRequest) request).shardId(), - ((FileChunkRequest) request).metadata(), - ((FileChunkRequest) request).position(), - new BytesArray("test"), - false, - 0, - 0L - ); - connection.sendRequest(requestId, action, corrupt, options); - latch.countDown(); - } else { - connection.sendRequest(requestId, action, request, options); - } - } - ); - for (int i = 0; i < 100; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); - } - final long originalRecoveryTime = getRecoveryStopTime(replicaNode); - assertNotEquals(originalRecoveryTime, 0); - refresh(INDEX_NAME); - latch.await(); - assertTrue(failed.get()); - waitForNewPeerRecovery(replicaNode, originalRecoveryTime); - // reset checkIndex to ensure our original shard doesn't throw - resetCheckIndexStatus(); - waitForSearchableDocs(100, primaryNode, replicaNode); - } - - public void testWipeSegmentBetweenSyncs() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String primaryNode = internalCluster().startDataOnlyNode(); - createIndex( - INDEX_NAME, - Settings.builder() - .put(indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.refresh_interval", -1) - .build() - ); - ensureYellow(INDEX_NAME); + // start a replica node, initially will be empty with no shard assignment. final String replicaNode = internalCluster().startDataOnlyNode(); + nodes.add(replicaNode); + final String replicaNode2 = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + ); ensureGreen(INDEX_NAME); - for (int i = 0; i < 10; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); - } + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); refresh(INDEX_NAME); - ensureGreen(INDEX_NAME); - final long originalRecoveryTime = getRecoveryStopTime(replicaNode); - final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); - waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); - indexShard.store().directory().deleteFile("_0.si"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + IndexShard replica_1 = getIndexShard(replicaNode, INDEX_NAME); + IndexShard replica_2 = getIndexShard(replicaNode2, INDEX_NAME); + // wait until a replica is promoted & finishes engine flip, we don't care which one + AtomicReference primary = new AtomicReference<>(); + assertBusy(() -> { + assertTrue("replica should be promoted as a primary", replica_1.routingEntry().primary() || replica_2.routingEntry().primary()); + primary.set(replica_1.routingEntry().primary() ? replica_1 : replica_2); + }); - for (int i = 11; i < 21; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); - } - refresh(INDEX_NAME); - waitForNewPeerRecovery(replicaNode, originalRecoveryTime); - resetCheckIndexStatus(); - waitForSearchableDocs(20, primaryNode, replicaNode); - } + FlushRequest request = new FlushRequest(INDEX_NAME); + request.force(true); + primary.get().flush(request); - private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { assertBusy(() -> { - // assert we have a peer recovery after the original - final long time = getRecoveryStopTime(replicaNode); - assertNotEquals(time, 0); - assertNotEquals(originalRecoveryTime, time); - - }, 1, TimeUnit.MINUTES); - } + assertEquals( + replica_1.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica_2.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); - private long getRecoveryStopTime(String nodeName) { - final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); - final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); - logger.info("Recovery states {}", recoveryResponse); - for (RecoveryState recoveryState : recoveryStates) { - if (recoveryState.getTargetNode().getName().equals(nodeName)) { - return recoveryState.getTimer().stopTime(); - } - } - return 0L; + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index dd832a63d1e66..dbe0b43441f54 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -26,6 +26,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -55,6 +56,7 @@ private void createIndex(int replicaCount) { * This test verifies happy path when primary shard is relocated newly added node (target) in the cluster. Before * relocation and after relocation documents are indexed and documents are verified */ + @TestLogging(reason = "Getting trace logs from replication,shard and allocation package", value = "org.opensearch.indices.replication:TRACE, org.opensearch.index.shard:TRACE, org.opensearch.cluster.routing.allocation:TRACE") public void testPrimaryRelocation() throws Exception { final String oldPrimary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java index 800704eae7fa7..8c045c1560dd3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -15,6 +16,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9499") @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class SegmentReplicationSuiteIT extends SegmentReplicationBaseIT { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 0967acb37d3e8..9c96d4861d426 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -169,7 +169,7 @@ private Settings.Builder settingsBuilder() { return Settings.builder().put(indexSettings()); } - public void testFieldDataStats() { + public void testFieldDataStats() throws InterruptedException { assertAcked( client().admin() .indices() @@ -182,6 +182,7 @@ public void testFieldDataStats() { client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( @@ -305,6 +306,7 @@ public void testClearAllCaches() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 2053800504c89..8166c0008ed83 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -114,6 +114,10 @@ protected void cleanupRepo() { } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + return setup(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure, 0); + } + + protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure, int replicaCount) { // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the // repository creation can happen without failure. @@ -128,6 +132,7 @@ protected String setup(Path repoLocation, double ioFailureRate, String skipExcep internalCluster().startClusterManagerOnlyNode(settings.build()); String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); + internalCluster().startDataOnlyNodes(replicaCount, settings.build()); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index ad3e99dd274ce..99c5d7fb2bae7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -46,7 +46,10 @@ protected Collection> nodePlugins() { } protected void restore(String... indices) { - boolean restoreAllShards = randomBoolean(); + restore(randomBoolean(), indices); + } + + protected void restore(boolean restoreAllShards, String... indices) { if (restoreAllShards) { assertAcked(client().admin().indices().prepareClose(indices)); } @@ -58,7 +61,7 @@ protected void restore(String... indices) { ); } - protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + protected void verifyRestoredData(Map indexStats, String indexName, boolean indexMoreData) throws Exception { ensureYellowAndNoInitializingShards(indexName); ensureGreen(indexName); // This is to ensure that shards that were already assigned will get latest count @@ -68,6 +71,8 @@ protected void verifyRestoredData(Map indexStats, String indexName 30, TimeUnit.SECONDS ); + if (indexMoreData == false) return; + IndexResponse response = indexSingleDoc(indexName); if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); @@ -80,6 +85,10 @@ protected void verifyRestoredData(Map indexStats, String indexName ); } + protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + verifyRestoredData(indexStats, indexName, true); + } + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 865b2d13f189e..21ce4be9981fb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.delete.DeleteResponse; @@ -20,8 +21,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.Index; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; @@ -32,11 +38,15 @@ import org.junit.Before; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; @@ -320,6 +330,8 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); ensureGreen(indexName1, restoredIndexName2); + + assertRemoteSegmentsAndTranslogUploaded(restoredIndexName2); assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); // indexing some new docs and validating @@ -345,6 +357,97 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 4); } + void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { + String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); + + Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); + Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); + Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); + Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + + try ( + Stream translogMetadata = Files.list(remoteTranslogMetadataPath); + Stream translogData = Files.list(remoteTranslogDataPath); + Stream segmentMetadata = Files.list(segmentMetadataPath); + Stream segmentData = Files.list(segmentDataPath); + + ) { + assertTrue(translogData.count() > 0); + assertTrue(translogMetadata.count() > 0); + assertTrue(segmentMetadata.count() > 0); + assertTrue(segmentData.count() > 0); + } + + } + + public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + + assertRemoteSegmentsAndTranslogUploaded(indexName1); + + // Clear the local data before stopping the node. This will make sure that remote translog is empty. + IndexShard indexShard = getIndexShard(primaryNodeName(indexName1), indexName1); + try (Stream files = Files.list(indexShard.shardPath().resolveTranslog())) { + IOUtils.deleteFilesIgnoringExceptions(files.collect(Collectors.toList())); + } + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + + ensureRed(indexName1); + + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(false), PlainActionFuture.newFuture()); + + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + + protected IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); @@ -486,4 +589,71 @@ public void testRestoreShallowSnapshotRepository() throws ExecutionException, In assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } + public void testRestoreShallowSnapshotIndexAfterSnapshot() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(List.of(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + int extraNumDocsInIndex1 = randomIntBetween(20, 50); + indexDocuments(client, indexName1, extraNumDocsInIndex1); + refresh(indexName1); + + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index 2c6db6ae19a9a..6b6a96dc42a84 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -11,9 +11,9 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -21,6 +21,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; +import org.opensearch.index.IndexServiceTestUtils; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; @@ -55,7 +56,7 @@ public void testWritesRejectedDueToBytesLagBreach() throws Exception { public void testWritesRejectedDueToTimeLagBreach() throws Exception { // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag"); + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag"); } private void validateBackpressure( @@ -132,11 +133,13 @@ private RemoteSegmentTransferTracker.Stats stats() { return matches.get(0).getSegmentStats(); } - private void indexDocAndRefresh(BytesReference source, int iterations) { + private void indexDocAndRefresh(BytesReference source, int iterations) throws InterruptedException { for (int i = 0; i < iterations; i++) { client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); refresh(INDEX_NAME); } + Thread.sleep(250); + client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); } /** @@ -172,7 +175,7 @@ public void testAsyncTrimTaskSucceeds() { logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing"); IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); - ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(TimeValue.timeValueMillis(100)); + IndexServiceTestUtils.setTrimTranslogTaskInterval(indexService, TimeValue.timeValueMillis(100)); logger.info("--> Indexing data"); indexData(randomIntBetween(2, 5), true); @@ -228,4 +231,32 @@ public void testSkipLoadGlobalCheckpointToReplicationTracker() { client().admin().cluster().prepareReroute().setRetryFailed(true).get(); ensureGreen(INDEX_NAME); } + + public void testFlushDuringRemoteUploadFailures() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + ensureGreen(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc()); + assertEquals("Failed execution", ex.getMessage()); + + FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getFailedShards()); + ensureGreen(INDEX_NAME); + + logger.info("--> Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getSuccessfulShards()); + assertEquals(0, flushResponse.getFailedShards()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index e2ef5f85abc74..8b4981a15433a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -56,7 +56,7 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; protected static final int SHARD_COUNT = 1; - protected static final int REPLICA_COUNT = 1; + protected static int REPLICA_COUNT = 1; protected static final String TOTAL_OPERATIONS = "total-operations"; protected static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; protected static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; @@ -353,7 +353,13 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { // Validated that all the restricted settings are entact on all the nodes. repository.getRestrictedSystemRepositorySettings() .stream() - .forEach(setting -> assertEquals(setting.get(actualRepository.settings()), setting.get(expectedRepository.settings()))); + .forEach( + setting -> assertEquals( + String.format(Locale.ROOT, "Restricted Settings mismatch [%s]", setting.getKey()), + setting.get(actualRepository.settings()), + setting.get(expectedRepository.settings()) + ) + ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 5e92bb195680b..c61e2ec6e4f6c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -8,23 +8,41 @@ package org.opensearch.remotestore; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.admin.indices.datastream.DataStreamRolloverIT; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutionException; +import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; +import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; -import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { @@ -48,68 +66,122 @@ private Map initialTestSetup(int shardCount, int replicaCount, int private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { internalCluster().stopAllNodes(); - addNewNodes(dataNodeCount, clusterManagerNodeCount); + internalCluster().startClusterManagerOnlyNodes(clusterManagerNodeCount); + internalCluster().startDataOnlyNodes(dataNodeCount); } - private void restoreAndValidate(String clusterUUID, Map indexStats) throws Exception { - restoreAndValidate(clusterUUID, indexStats, true); + protected void verifyRedIndicesAndTriggerRestore(Map indexStats, String indexName, boolean indexMoreDocs) + throws Exception { + ensureRed(indexName); + restore(false, indexName); + verifyRestoredData(indexStats, indexName, indexMoreDocs); } - private void restoreAndValidate(String clusterUUID, Map indexStats, boolean validate) throws Exception { - // TODO once auto restore is merged, the remote cluster state will be restored + public void testFullClusterRestore() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; - if (validate) { - // Step - 4 validation restore is successful. - ensureGreen(INDEX_NAME); - verifyRestoredData(indexStats, INDEX_NAME); - } - } + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); - private void restoreAndValidateFails( - String clusterUUID, - PlainActionFuture actionListener, - Class clazz, - String errorSubString - ) { + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); - try { - restoreAndValidate(clusterUUID, null, false); - } catch (Exception e) { - assertTrue( - String.format(Locale.ROOT, "%s %s", clazz, e), - clazz.isAssignableFrom(e.getClass()) - || clazz.isAssignableFrom(e.getCause().getClass()) - || (e.getCause().getCause() != null && clazz.isAssignableFrom(e.getCause().getCause().getClass())) - ); - assertTrue( - String.format(Locale.ROOT, "Error message mismatch. Expected: [%s]. Actual: [%s]", errorSubString, e.getMessage()), - e.getMessage().contains(errorSubString) - ); - } } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestore() throws Exception { + /** + * This test scenario covers the case where right after remote state restore and persisting it to disk via LucenePersistedState, full cluster restarts. + * This is a special case for remote state as at this point cluster uuid in the restored state is still ClusterState.UNKNOWN_UUID as we persist it disk. + * After restart the local disk state will be read but should be again overridden with remote state. + * + * 1. Form a cluster and index few docs + * 2. Replace all nodes to remove all local disk state + * 3. Start cluster manager node without correct seeding to ensure local disk state is written with cluster uuid ClusterState.UNKNOWN_UUID but with remote restored Metadata + * 4. Restart the cluster manager node with correct seeding. + * 5. After restart the cluster manager picks up the local disk state with has same Metadata as remote but cluster uuid is still ClusterState.UNKNOWN_UUID + * 6. The cluster manager will try to restore from remote again. + * 7. Metadata loaded from local disk state will be overridden with remote Metadata and no conflict should arise. + * 8. Add data nodes to recover index data + * 9. Verify Metadata and index data is restored. + */ + public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); int clusterManagerNodeCount = 1; - // Step - 1 index some data to generate files in remote directory + // index some data to generate files in remote directory Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); - // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata - resetCluster(dataNodeCount, clusterManagerNodeCount); + // stop all nodes + internalCluster().stopAllNodes(); + + // start a cluster manager node with no cluster manager seeding. + // This should fail with IllegalStateException as cluster manager fails to form without any initial seed + assertThrows( + IllegalStateException.class, + () -> internalCluster().startClusterManagerOnlyNodes( + clusterManagerNodeCount, + Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable seeding during bootstrapping + .build() + ) + ); + // verify cluster manager not elected String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) + : "Disabling Cluster manager seeding failed. cluster uuid is not unknown"; + + // restart cluster manager with correct seed + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) // Seed with correct Cluster Manager node + .build(); + } + }); + + // validate new cluster state formed + newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown"; assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 3 Trigger full cluster restore and validate - restoreAndValidate(prevClusterUUID, indexStats); + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME)); + + // start data nodes to trigger index data recovery + internalCluster().startDataOnlyNodes(dataNodeCount); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") public void testFullClusterRestoreMultipleIndices() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -124,8 +196,10 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { Map indexStats2 = indexData(1, false, secondIndexName); assertEquals((shardCount + 1) * (replicaCount + 1), getNumShards(secondIndexName).totalNumShards); ensureGreen(secondIndexName); + updateIndexBlock(true, secondIndexName); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata resetCluster(dataNodeCount, clusterManagerNodeCount); @@ -133,99 +207,157 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 3 Trigger full cluster restore - restoreAndValidate(prevClusterUUID, indexStats); - ensureGreen(secondIndexName); - verifyRestoredData(indexStats2, secondIndexName); + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME, secondIndexName)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + verifyRedIndicesAndTriggerRestore(indexStats2, secondIndexName, false); + assertTrue(INDEX_READ_ONLY_SETTING.get(clusterService().state().metadata().index(secondIndexName).getSettings())); + assertThrows(ClusterBlockException.class, () -> indexSingleDoc(secondIndexName)); + // Test is complete + + // Remove the block to ensure proper cleanup + updateIndexBlock(false, secondIndexName); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestoreFailureValidationFailures() throws Exception { + public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); int clusterManagerNodeCount = 1; - // index some data to generate files in remote directory - Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + // Step - 1 index some data to generate files in remote directory + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - // Start of Test - 1 - // Test - 1 Trigger full cluster restore and validate it fails due to incorrect cluster UUID - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreAndValidateFails("randomUUID", future, IllegalStateException.class, "Remote Cluster State not found - randomUUID"); - // End of Test - 1 - - // Start of Test - 3 - // Test - 2 Trigger full cluster restore and validate it fails due to cluster UUID same as current cluster UUID - future = PlainActionFuture.newFuture(); - restoreAndValidateFails( - clusterService().state().metadata().clusterUUID(), - future, - IllegalArgumentException.class, - "clusterUUID to restore from should be different from current cluster UUID" - ); - // End of Test - 2 + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + String clusterName = clusterService().state().getClusterName().value(); - // Start of Test - 3 // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata - // Restarting cluster with just 1 data node helps with applying cluster settings - resetCluster(1, clusterManagerNodeCount); - String newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - - reduceShardLimits(1, 1); - - // Step - 4 Trigger full cluster restore and validate it fails - future = PlainActionFuture.newFuture(); - restoreAndValidateFails( - prevClusterUUID, - future, - IllegalArgumentException.class, - "this action would add [2] total shards, but this cluster currently has [0]/[1] maximum shards open" - ); - resetShardLimits(); - // End of Test - 3 + internalCluster().stopAllNodes(); + // Step - 3 Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertThrows(IllegalStateException.class, () -> addNewNodes(dataNodeCount, clusterManagerNodeCount)); + // Test is complete - // Start of Test - 4 - // Test -4 Reset cluster and trigger full restore with same name index in the cluster - // Test -4 Add required nodes for this test after last reset. - addNewNodes(dataNodeCount - 1, 0); + // Starting a node without remote state to ensure test cleanup + internalCluster().startNode(Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build()); + } - newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + public void testRemoteStateFullRestart() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; - // Test -4 Step - 2 Create a new index with same name - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); - ensureYellowAndNoInitializingShards(INDEX_NAME); + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + // Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) + + "/cluster-state/" + + prevClusterUUID + + "/manifest" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + internalCluster().fullRestart(); ensureGreen(INDEX_NAME); + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateCurrentMetadata(); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } - future = PlainActionFuture.newFuture(); + private void validateMetadata(List indexNames) { + assertEquals(clusterService().state().metadata().indices().size(), indexNames.size()); + for (String indexName : indexNames) { + assertTrue(clusterService().state().metadata().hasIndex(indexName)); + } + } - // Test -4 Step - 3 Trigger full cluster restore and validate fails - restoreAndValidateFails( - prevClusterUUID, - future, - IllegalStateException.class, - "cannot restore index [remote-store-test-idx-1] because an open index with same name/uuid already exists in the cluster" + private void validateCurrentMetadata() throws Exception { + RemoteClusterStateService remoteClusterStateService = internalCluster().getInstance( + RemoteClusterStateService.class, + internalCluster().getClusterManagerName() ); + assertBusy(() -> { + ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + ClusterState clusterState = getClusterState(); + Metadata currentMetadata = clusterState.metadata(); + assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); + assertEquals(currentMetadata.coordinationMetadata().term(), manifest.getClusterTerm()); + assertEquals(clusterState.version(), manifest.getStateVersion()); + assertEquals(clusterState.stateUUID(), manifest.getStateUUID()); + assertEquals(currentMetadata.clusterUUIDCommitted(), manifest.isClusterUUIDCommitted()); + for (UploadedIndexMetadata uploadedIndexMetadata : manifest.getIndices()) { + IndexMetadata currentIndexMetadata = currentMetadata.index(uploadedIndexMetadata.getIndexName()); + assertEquals(currentIndexMetadata.getIndex().getUUID(), uploadedIndexMetadata.getIndexUUID()); + } + }); + } - // Test -4 Step - 4 validation restore is successful. - ensureGreen(INDEX_NAME); - // End of Test - 4 + public void testDataStreamPostRemoteStateRestore() throws Exception { + new DataStreamRolloverIT() { + protected boolean triggerRemoteStateRestore() { + return true; + } + }.testDataStreamRollover(); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { + public void testFullClusterRestoreGlobalMetadata() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); int clusterManagerNodeCount = 1; // Step - 1 index some data to generate files in remote directory - initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Create global metadata - register a custom repo + Path repoPath = registerCustomRepository(); + + // Create global metadata - persistent settings + updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build()); + + // Create global metadata - index template + putIndexTemplate(); + + // Create global metadata - Put cluster block + addClusterLevelReadOnlyBlock(); // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata resetCluster(dataNodeCount, clusterManagerNodeCount); @@ -233,56 +365,102 @@ public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathTh String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 4 Delete index metadata file in remote - try { - Files.move( - segmentRepoPath.resolve( - RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) - + "/cluster-state/" - + prevClusterUUID - + "/index" - ), - segmentRepoPath.resolve("cluster-state/") - ); - } catch (IOException e) { - throw new RuntimeException(e); - } + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + + validateCurrentMetadata(); + assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); + assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + // Remote the cluster read only block to ensure proper cleanup + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertFalse(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); - // Step - 5 Trigger full cluster restore and validate fails - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreAndValidateFails(prevClusterUUID, future, IllegalStateException.class, "asdsa"); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + + // validate global metadata restored + verifyRestoredRepositories(repoPath); + verifyRestoredIndexTemplate(); } - private void reduceShardLimits(int maxShardsPerNode, int maxShardsPerCluster) { - // Step 3 - Reduce shard limits to hit shard limit with less no of shards - try { + private Path registerCustomRepository() { + Path path = randomRepoPath(); + assertAcked( client().admin() .cluster() - .updateSettings( - new ClusterUpdateSettingsRequest().transientSettings( - Settings.builder() - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) - .put(SETTING_MAX_SHARDS_PER_CLUSTER_KEY, maxShardsPerCluster) - ) - ) - .get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } + .preparePutRepository("custom-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", path).put("compress", false)) + .get() + ); + return path; } - private void resetShardLimits() { - // Step - 5 Reset the cluster settings + private void verifyRestoredRepositories(Path repoPath) { + RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE); + assertEquals(3, repositoriesMetadata.repositories().size()); // includes remote store repo as well + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); + assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + assertEquals( + Settings.builder().put("location", repoPath).put("compress", false).build(), + repositoriesMetadata.repository("custom-repo").settings() + ); + + // repo cleanup post verification + clusterAdmin().prepareDeleteRepository("custom-repo").get(); + } + + private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException { + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), true).build()); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + } + + private void updatePersistentSettings(Settings settings) throws ExecutionException, InterruptedException { ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); - resetRequest.transientSettings( - Settings.builder().putNull(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()).putNull(SETTING_MAX_SHARDS_PER_CLUSTER_KEY) + resetRequest.persistentSettings(settings); + assertAcked(client().admin().cluster().updateSettings(resetRequest).get()); + } + + private void verifyRestoredIndexTemplate() { + Map indexTemplateMetadataMap = clusterService().state().metadata().templates(); + assertEquals(1, indexTemplateMetadataMap.size()); + assertEquals(Arrays.asList("pattern-1", "log-*"), indexTemplateMetadataMap.get("my-template").patterns()); + assertEquals( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + .build(), + indexTemplateMetadataMap.get("my-template").settings() ); + } - try { - client().admin().cluster().updateSettings(resetRequest).get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } + private static void putIndexTemplate() { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); } + private static void updateIndexBlock(boolean value, String secondIndexName) throws InterruptedException, ExecutionException { + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(Settings.builder().put(INDEX_READ_ONLY_SETTING.getKey(), value).build(), secondIndexName) + ) + .get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 1fb5c2052aded..b3b4f8e10fd31 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -509,4 +509,27 @@ public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Excepti assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); }); } + + public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + IndexShard indexShard = getIndexShard(primaryShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + + String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen(INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + indexShard = getIndexShard(replicaShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index 002a149f0c286..ef2dcf3217df6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -8,21 +8,31 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { @@ -162,4 +172,13 @@ public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { ensureStableCluster(4); } + + public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException { + GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON)); + XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder)); + assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings())); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 8ae25c6758195..2d3ab135d0377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -15,6 +15,8 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -23,15 +25,20 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Before; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -44,12 +51,17 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; - @Before + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + public void setup() { internalCluster().startNodes(3); } public void testStatsResponseFromAllNodes() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -118,6 +130,7 @@ public void testStatsResponseFromAllNodes() { } public void testStatsResponseAllShards() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -175,6 +188,7 @@ public void testStatsResponseAllShards() { } public void testStatsResponseFromLocalNode() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -236,6 +250,7 @@ public void testStatsResponseFromLocalNode() { } public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Disable Refresh Interval for the index @@ -325,6 +340,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { + setup(); // Scenario: // - Create index with single primary and N-1 replica shards (N = no of data nodes) // - Disable Refresh Interval for the index @@ -416,6 +432,7 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr } public void testStatsOnShardRelocation() { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Index documents @@ -471,6 +488,7 @@ public void testStatsOnShardRelocation() { } public void testStatsOnShardUnassigned() throws IOException { + setup(); // Scenario: // - Create index with single primary and two replica shard // - Index documents @@ -497,6 +515,7 @@ public void testStatsOnShardUnassigned() throws IOException { } public void testStatsOnRemoteStoreRestore() throws IOException { + setup(); // Creating an index with primary shard count == total nodes in cluster and 0 replicas int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount)); @@ -544,6 +563,7 @@ public void testStatsOnRemoteStoreRestore() throws IOException { } public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exception { + setup(); // Create an index with one primary and one replica shard createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); ensureGreen(INDEX_NAME); @@ -561,26 +581,103 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce .getRemoteStoreStats(); Arrays.stream(remoteStoreStats).forEach(statObject -> { RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); if (statObject.getShardRouting().primary()) { assertTrue( segmentStats.totalUploadsSucceeded == 1 && segmentStats.totalUploadsStarted == segmentStats.totalUploadsSucceeded && segmentStats.totalUploadsFailed == 0 ); + // On primary shard creation, we upload to remote translog post primary mode activation. + // This changes upload stats to non-zero for primary shard. + assertNonZeroTranslogUploadStatsNoFailures(translogStats); } else { assertTrue( segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 ); + assertZeroTranslogUploadStats(translogStats); } - - RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); - assertZeroTranslogUploadStats(translogStats); assertZeroTranslogDownloadStats(translogStats); }); }, 5, TimeUnit.SECONDS); } + public void testStatsCorrectnessOnFailover() { + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(nodeSettings(0)) + .build(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(clusterSettings); + internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + + // Index some docs and refresh + indexDocs(); + refresh(INDEX_NAME); + + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + + // Start network disruption - primary node will be isolated + Set nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + ensureStableCluster(2, clusterManagerNode); + + RemoteStoreStatsResponse response = client(clusterManagerNode).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, "0"); + List matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(1, matches.size()); + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + assertEquals(0, segmentStats.refreshTimeLagMs); + + networkDisruption.stopDisrupting(); + internalCluster().clearDisruptionScheme(); + ensureStableCluster(3, clusterManagerNode); + ensureGreen(INDEX_NAME); + logger.info("Test completed"); + } + + public void testZeroLagOnCreateIndex() throws InterruptedException { + setup(); + String clusterManagerNode = internalCluster().getClusterManagerName(); + + int numOfShards = randomIntBetween(1, 3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, numOfShards)); + ensureGreen(INDEX_NAME); + long currentTimeNs = System.nanoTime(); + while (currentTimeNs == System.nanoTime()) { + Thread.sleep(10); + } + + for (int i = 0; i < numOfShards; i++) { + RemoteStoreStatsResponse response = client(clusterManagerNode).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, String.valueOf(i)) + .get(); + for (RemoteStoreStats remoteStoreStats : response.getRemoteStoreStats()) { + assertEquals(0, remoteStoreStats.getSegmentStats().refreshTimeLagMs); + } + } + } + private void indexDocs() { for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java new file mode 100644 index 0000000000000..b7b3f1d14f422 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Optional; +import java.util.Set; + +/** + * This class runs tests with remote store + segRep while blocking file downloads + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationUsingRemoteStoreDisruptionIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(1); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCancelReplicationWhileSyncingSegments() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnySegmentFile(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_FILES, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + public void testCancelReplicationWhileFetchingMetadata() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnyFiles(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + private String getNode(Set dataNodeNames, boolean primary) { + assertEquals(2, dataNodeNames.size()); + for (String name : dataNodeNames) { + final IndexShard indexShard = getIndexShard(name, INDEX_NAME); + if (indexShard.routingEntry().primary() == primary) { + return name; + } + } + return null; + } + + private IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index 94816346e6c9e..52cc797ddd8da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -93,6 +93,7 @@ public void testSimpleTimeout() throws Exception { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(5, TimeUnit.MILLISECONDS)) @@ -104,12 +105,11 @@ public void testSimpleTimeout() throws Exception { } public void testSimpleDoesNotTimeout() throws Exception { - final int numDocs = 10; + final int numDocs = 9; for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } - refresh("test"); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(10000, TimeUnit.SECONDS)) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) @@ -122,7 +122,7 @@ public void testSimpleDoesNotTimeout() throws Exception { public void testPartialResultsIntolerantTimeout() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - + indexRandomForConcurrentSearch("test"); OpenSearchException ex = expectThrows( OpenSearchException.class, () -> client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 5207dab83f1d9..aa1fe695ecc12 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -501,8 +501,9 @@ public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Excep logger.info("--> creating network partition disruption"); final String clusterManagerNode1 = internalCluster().getClusterManagerName(); - Set nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0)).collect(Collectors.toCollection(HashSet::new)); - Set nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOneSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0), nodeMap.get("c").get(0)) + .collect(Collectors.toCollection(HashSet::new)); NetworkDisruption networkDisruption = new NetworkDisruption( new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), @@ -870,8 +871,7 @@ private void assertSearchInAZ(String az) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); if (stat.getNode().isDataNode()) { if (stat.getNode().getId().equals(dataNodeId)) { - Assert.assertTrue(searchStats.getFetchCount() > 0L); - Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L || searchStats.getQueryCount() > 0L); } } } @@ -945,7 +945,6 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws } logger.info("--> network disruption is stopped"); - networkDisruption.stopDisrupting(); for (int i = 0; i < 50; i++) { try { @@ -962,6 +961,8 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws fail("search should not fail"); } } + networkDisruption.stopDisrupting(); + assertSearchInAZ("b"); assertSearchInAZ("c"); assertNoSearchInAZ("a"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 865dd670fbf68..1d5f7f93e7410 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -33,9 +33,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -221,10 +222,6 @@ public void testNestedDiversity() throws Exception { } public void testNestedSamples() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); // Test samples nested under samples int MAX_DOCS_PER_AUTHOR = 1; int MAX_DOCS_PER_GENRE = 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 1ef2c0e8db8c7..6289cd5e36151 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -172,6 +172,7 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("idx").setSource(source.endObject()).get(); } refresh(); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index b3009ffcf4f45..7af2ac218800d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -224,6 +224,7 @@ public void setupSuiteScopeCluster() throws Exception { ) ); indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } @@ -354,6 +355,7 @@ public void testNestedAsSubAggregation() throws Exception { } public void testNestNestedAggs() throws Exception { + indexRandomForConcurrentSearch("idx_nested_nested_aggs"); SearchResponse response = client().prepareSearch("idx_nested_nested_aggs") .addAggregation( nested("level1", "nested1").subAggregation( @@ -607,6 +609,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("idx4"); SearchResponse response = client().prepareSearch("idx4") .addAggregation( @@ -782,6 +785,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("classes"); SearchResponse response = client().prepareSearch("classes") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index 64ab6f1382ac3..5812b7796c33e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -184,6 +184,7 @@ public void setupSuiteScopeCluster() throws Exception { builders.add(client().prepareIndex("new_index").setSource(Collections.emptyMap())); indexRandom(true, builders); + indexRandomForMultipleSlices("idx", "old_index", "new_index"); ensureSearchable(); } @@ -917,6 +918,7 @@ public void testOverlappingRanges() throws Exception { } public void testEmptyAggregation() throws Exception { + indexRandomForConcurrentSearch("empty_bucket_idx"); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index 7033c42c5d661..c7b03d21cb6bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -34,9 +34,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -195,6 +196,23 @@ public void testSimpleSampler() throws Exception { assertThat(maxBooksPerAuthor, equalTo(3L)); } + public void testSimpleSamplerShardSize() throws Exception { + final int SHARD_SIZE = randomIntBetween(1, 3); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(SHARD_SIZE); + sampleAgg.subAggregation(terms("authors").field("author")); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); + assertSearchResponse(response); + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + assertEquals(SHARD_SIZE * NUM_SHARDS, sample.getDocCount()); + } + public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 3fcf4b5d533d4..b355ce6d7a8dd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -225,6 +225,7 @@ public void setupSuiteScopeCluster() throws Exception { } indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 2bf5230c67b43..6cbf278317b1b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -76,6 +76,7 @@ protected Settings featureFlagSettings() { /** * Test that searches using cardinality aggregations returns all request breaker memory. */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10154") public void testRequestBreaker() throws Exception { final String requestBreaker = randomIntBetween(1, 10000) + "kb"; logger.info("--> Using request breaker setting: {}", requestBreaker); @@ -99,6 +100,7 @@ public void testRequestBreaker() throws Exception { ) .get(); + indexRandomForConcurrentSearch("test"); try { client().prepareSearch("test") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index 18484c8a60ed7..41bbffc13658b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -693,6 +693,7 @@ public void testAvgMovavgDerivNPE() throws Exception { } refresh(); + indexRandomForConcurrentSearch("movavg_npe"); SearchResponse response = client().prepareSearch("movavg_npe") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 2aad0d2d38901..8ad3107ac33ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -1320,6 +1320,7 @@ public void testPredictWithNonEmptyBuckets() throws Exception { .setSource(jsonBuilder().startObject().field(INTERVAL_FIELD, i).field(VALUE_FIELD2, 10).endObject()) ); } + indexRandomForConcurrentSearch("predict_non_empty"); bulkBuilder.get(); ensureSearchable(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java index 44c4981dfdb36..bd623ccdf2731 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java @@ -153,6 +153,7 @@ private void buildRedIndex(int numShards) throws Exception { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java index 71af7215c4eb7..a5989b693d332 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java @@ -106,6 +106,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) } client().prepareIndex("test").setId(id).setSource("field", "test").get(); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index ce5f3f63faa66..edceb0cbc0d24 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -128,6 +128,7 @@ private Set prepareData(int numShards) throws Exception { fullExpectedIds.add(Integer.toString(i)); } refresh(); + indexRandomForConcurrentSearch("test"); return fullExpectedIds; } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 86df25c4dad65..87f2153eb800f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -119,6 +119,7 @@ public void testPlugin() throws Exception { .actionGet(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setSource(new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index 9b3e1337418cc..1a730c01e4890 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -897,6 +897,7 @@ public void testNestedSource() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("index1"); // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: @@ -973,6 +974,7 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { client().prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); client().prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); + indexRandomForConcurrentSearch("index1", "index2"); SearchResponse response = client().prepareSearch("index1", "index2") .setQuery( @@ -1002,6 +1004,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); QueryBuilder query = nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1) ); @@ -1019,6 +1022,7 @@ public void testTooHighResultWindow() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); SearchResponse response = client().prepareSearch("index2") .setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index 23b5d0cab0697..83cedb8c20e1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -91,6 +91,7 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -141,6 +142,7 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test").get(); client().prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -192,6 +194,7 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex client().prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title"))) @@ -224,12 +227,13 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex } } - public void testRegExpQuerySupportsName() { + public void testRegExpQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")) @@ -246,12 +250,13 @@ public void testRegExpQuerySupportsName() { } } - public void testPrefixQuerySupportsName() { + public void testPrefixQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")) @@ -268,12 +273,13 @@ public void testPrefixQuerySupportsName() { } } - public void testFuzzyQuerySupportsName() { + public void testFuzzyQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")) @@ -290,12 +296,13 @@ public void testFuzzyQuerySupportsName() { } } - public void testWildcardQuerySupportsName() { + public void testWildcardQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")) @@ -312,12 +319,13 @@ public void testWildcardQuerySupportsName() { } } - public void testSpanFirstQuerySupportsName() { + public void testSpanFirstQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")) @@ -344,6 +352,7 @@ public void testMatchedWithShould() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); client().prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); @@ -378,6 +387,7 @@ public void testMatchedWithWrapperQuery() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); refresh(); + indexRandomForConcurrentSearch("test"); MatchQueryBuilder matchQueryBuilder = matchQuery("content", "amet").queryName("abc"); BytesReference matchBytes = XContentHelper.toXContent(matchQueryBuilder, MediaTypeRegistry.JSON, false); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 42d91ac945662..2afa911223074 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -155,7 +155,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); } - public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException { + public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties") @@ -177,6 +177,7 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio .setSource(jsonBuilder().startObject().array("tags", "foo baz", "foo baz", "foo baz", "foo bar").field("sort", 2).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { SearchResponse search = client().prepareSearch() @@ -190,12 +191,13 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio } } - public void testHighlightingWithStoredKeyword() throws IOException { + public void testHighlightingWithStoredKeyword() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties").startObject("text").field("type", "keyword").field("store", true).endObject().endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); + indexRandomForConcurrentSearch("test"); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); SearchResponse search = client().prepareSearch() @@ -205,7 +207,7 @@ public void testHighlightingWithStoredKeyword() throws IOException { assertHighlight(search, 0, "text", 0, equalTo("foo")); } - public void testHighlightingWithWildcardName() throws IOException { + public void testHighlightingWithWildcardName() throws IOException, InterruptedException { // test the kibana case with * as fieldname that will try highlight all fields including meta fields XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -221,6 +223,7 @@ public void testHighlightingWithWildcardName() throws IOException { assertAcked(prepareCreate("test").setMapping(mappings)); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -230,7 +233,7 @@ public void testHighlightingWithWildcardName() throws IOException { } } - public void testFieldAlias() throws IOException { + public void testFieldAlias() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -248,7 +251,7 @@ public void testFieldAlias() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -257,7 +260,7 @@ public void testFieldAlias() throws IOException { } } - public void testFieldAliasWithSourceLookup() throws IOException { + public void testFieldAliasWithSourceLookup() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -276,7 +279,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo bar").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -285,7 +288,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { } } - public void testFieldAliasWithWildcardField() throws IOException { + public void testFieldAliasWithWildcardField() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("keyword") @@ -301,13 +304,14 @@ public void testFieldAliasWithWildcardField() throws IOException { client().prepareIndex("test").setId("1").setSource("keyword", "foo").get(); refresh(); + indexRandomForConcurrentSearch("test"); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); SearchResponse search = client().prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); assertHighlight(search, 0, "alias", 0, equalTo("foo")); } - public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { + public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("_source") @@ -334,6 +338,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -350,7 +355,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc } // see #3486 - public void testHighTermFrequencyDoc() throws IOException { + public void testHighTermFrequencyDoc() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping("name", "type=text,term_vector=with_positions_offsets,store=" + randomBoolean())); StringBuilder builder = new StringBuilder(); for (int i = 0; i < 6000; i++) { @@ -358,6 +363,7 @@ public void testHighTermFrequencyDoc() throws IOException { } client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("name", "abc"))) .highlighter(new HighlightBuilder().field("name")) @@ -385,6 +391,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed")) .highlighter(new HighlightBuilder().field("long_term", 18, 1).highlighterType("fvh")) @@ -671,7 +678,7 @@ public void testHighlightIssue1994() throws Exception { assertHighlight(search, 0, "titleTV", 1, 2, equalTo("highlight other text")); } - public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { + public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() throws InterruptedException { createIndex("test"); ensureGreen(); @@ -684,6 +691,7 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 and field2 produces different tags"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -734,6 +742,7 @@ public void testHighlightingOnWildcardFields() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field*"); SearchSourceBuilder source = searchSource() @@ -783,6 +792,7 @@ public void testForceSourceWithSourceDisabled() throws Exception { .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content") .get(); refresh(); + indexRandomForConcurrentSearch("test"); // works using stored field SearchResponse searchResponse = client().prepareSearch("test") @@ -823,6 +833,7 @@ public void testPlainHighlighter() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -1025,6 +1036,7 @@ public void testFVHManyMatches() throws Exception { String value = new String(new char[1024 * 256 / pattern.length()]).replace("\0", pattern); client().prepareIndex("test").setSource("field1", value).get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) @@ -1116,6 +1128,7 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception ); index("test", "type1", "3", "foo", "weird", "bar", "result"); refresh(); + indexRandomForConcurrentSearch("test"); Field fooField = new Field("foo").numOfFragments(1) .order("score") @@ -1408,6 +1421,7 @@ public void testMultiMapperVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1453,6 +1467,7 @@ public void testMultiMapperVectorFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1498,6 +1513,7 @@ public void testMultiMapperNoVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1542,6 +1558,7 @@ public void testMultiMapperNoVectorFromSource() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1571,6 +1588,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio .setSource("title", "This is a test for the enabling fast vector highlighter"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "this is a test")) @@ -1608,6 +1626,7 @@ public void testDisableFastVectorHighlighter() throws Exception { .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "test for the workaround")) @@ -1669,6 +1688,7 @@ public void testFSHHighlightAllMvFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "tag")) @@ -1686,11 +1706,12 @@ public void testFSHHighlightAllMvFragments() throws Exception { ); } - public void testBoostingQuery() { + public void testBoostingQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1702,11 +1723,12 @@ public void testBoostingQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testBoostingQueryTermVector() throws IOException { + public void testBoostingQueryTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1718,12 +1740,13 @@ public void testBoostingQueryTermVector() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsQuery() { + public void testCommonTermsQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -1733,12 +1756,13 @@ public void testCommonTermsQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsTermVector() throws IOException { + public void testCommonTermsTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); @@ -1764,6 +1788,7 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) @@ -1816,12 +1841,13 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ); } - public void testPlainHighlighterMultipleFields() { + public void testPlainHighlighterMultipleFields() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1834,7 +1860,7 @@ public void testPlainHighlighterMultipleFields() { assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); } - public void testFastVectorHighlighterMultipleFields() { + public void testFastVectorHighlighterMultipleFields() throws InterruptedException { assertAcked( prepareCreate("test").setMapping( "field1", @@ -1847,6 +1873,7 @@ public void testFastVectorHighlighterMultipleFields() { index("test", "type1", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1864,6 +1891,7 @@ public void testMissingStoredField() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); // This query used to fail when the field to highlight was absent SearchResponse response = client().prepareSearch("test") @@ -1904,6 +1932,7 @@ public void testNumericHighlighting() throws Exception { .setSource("text", "opensearch test", "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1926,6 +1955,7 @@ public void testResetTwice() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("text", "opensearch test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1935,7 +1965,7 @@ public void testResetTwice() throws Exception { assertHitCount(response, 1L); } - public void testHighlightUsesHighlightQuery() throws IOException { + public void testHighlightUsesHighlightQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -1946,6 +1976,7 @@ public void testHighlightUsesHighlightQuery() throws IOException { index("test", "type1", "1", "text", "Testing the highlight query feature"); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder.Field field = new HighlightBuilder.Field("text"); @@ -1981,7 +2012,7 @@ private static String randomStoreField() { return ""; } - public void testHighlightNoMatchSize() throws IOException { + public void testHighlightNoMatchSize() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -1993,6 +2024,7 @@ public void testHighlightNoMatchSize() throws IOException { String text = "I am pretty long so some of me should get cut off. Second sentence"; index("test", "type1", "1", "text", text); refresh(); + indexRandomForConcurrentSearch("test"); // When you don't set noMatchSize you don't get any results if there isn't anything to highlight. HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21).numOfFragments(1).highlighterType("plain"); @@ -2091,7 +2123,7 @@ public void testHighlightNoMatchSize() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { + public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -2104,6 +2136,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { String text2 = "I am short"; index("test", "type1", "1", "text", new String[] { text1, text2 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21) @@ -2186,7 +2219,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { + public void testHighlightNoMatchSizeNumberOfFragments() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -2200,6 +2233,7 @@ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { String text3 = "This is the fifth sentence"; index("test", "type1", "1", "text", new String[] { text1, text2, text3 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(1) @@ -2243,6 +2277,7 @@ public void testPostingsHighlighter() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -2320,6 +2355,7 @@ public void testPostingsHighlighterMultipleFields() throws Exception { "The slow brown fox. Second sentence." ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -2344,6 +2380,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) @@ -2376,6 +2413,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); @@ -2412,7 +2450,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { } } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws IOException, InterruptedException { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") @@ -2434,6 +2472,7 @@ public void testMultiMatchQueryHighlight() throws IOException { .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") .get(); refresh(); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { String highlighterType = rarely() ? null : RandomPicks.randomFrom(random(), ALL_TYPES); @@ -2479,6 +2518,7 @@ public void testPostingsHighlighterOrderByScore() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) @@ -2565,6 +2605,7 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2623,6 +2664,7 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2672,13 +2714,14 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { assertNoFailures(search); } - public void testPostingsHighlighterBoostingQuery() throws IOException { + public void testPostingsHighlighterBoostingQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); client().prepareIndex("test") .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2689,7 +2732,7 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); } - public void testPostingsHighlighterCommonTermsQuery() throws IOException { + public void testPostingsHighlighterCommonTermsQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); @@ -2697,6 +2740,7 @@ public void testPostingsHighlighterCommonTermsQuery() throws IOException { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -2738,6 +2782,7 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui")).highlighter(highlight().field("field2")); @@ -2760,6 +2805,7 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck")).highlighter(highlight().field("field2")); @@ -2783,6 +2829,7 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k")).highlighter(highlight().field("field2")); @@ -2806,6 +2853,7 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*")).highlighter(highlight().field("field2")); @@ -2840,6 +2888,7 @@ public void testPostingsHighlighterTermRangeQuery() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz")) @@ -2857,6 +2906,7 @@ public void testPostingsHighlighterQueryString() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2")) @@ -2878,6 +2928,7 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+"))) @@ -2892,6 +2943,7 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2909,6 +2961,7 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2924,6 +2977,7 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -3028,7 +3082,7 @@ public void testFastVectorHighlighterPhraseBoost() throws Exception { * because it doesn't support the concept of terms having a different weight based on position. * @param highlighterType highlighter to test */ - private void phraseBoostTestCase(String highlighterType) { + private void phraseBoostTestCase(String highlighterType) throws InterruptedException { ensureGreen(); StringBuilder text = new StringBuilder(); text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n"); @@ -3041,6 +3095,7 @@ private void phraseBoostTestCase(String highlighterType) { } index("test", "type1", "1", "field1", text.toString()); refresh(); + indexRandomForConcurrentSearch("test"); // Match queries phraseBoostTestCaseForClauses( @@ -3109,7 +3164,7 @@ private

> void phraseBoostTestCaseForClauses( assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher); } - public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException { + public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException, InterruptedException { // check that we do not get an exception for geo_point fields in case someone tries to highlight // it accidentially with a wildcard // see https://github.com/elastic/elasticsearch/issues/17537 @@ -3133,6 +3188,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); String highlighterType = randomFrom(ALL_TYPES); QueryBuilder query = QueryBuilders.boolQuery() .should( @@ -3150,7 +3206,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); } - public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { + public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException, InterruptedException { // same as above but in this example the query gets rewritten during highlighting // see https://github.com/elastic/elasticsearch/issues/17537#issuecomment-244939633 XContentBuilder mappings = jsonBuilder(); @@ -3177,6 +3233,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); QueryBuilder query = QueryBuilders.functionScoreQuery( QueryBuilders.boolQuery() @@ -3192,7 +3249,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException assertThat(search.getHits().getTotalHits().value, equalTo(1L)); } - public void testKeywordFieldHighlighting() throws IOException { + public void testKeywordFieldHighlighting() throws IOException, InterruptedException { // check that keyword highlighting works XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -3205,6 +3262,7 @@ public void testKeywordFieldHighlighting() throws IOException { .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setSource( new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) @@ -3238,6 +3296,7 @@ public void testCopyToFields() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchQuery("foo_copy", "brown")) .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) @@ -3287,7 +3346,7 @@ public void testACopyFieldWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) @@ -3305,6 +3364,7 @@ public void testFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) @@ -3322,6 +3382,7 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").field("enable", "yes").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder.FilterFunctionBuilder filterBuilder = new FunctionScoreQueryBuilder.FilterFunctionBuilder( QueryBuilders.termQuery("enable", "yes"), new RandomScoreFunctionBuilder() @@ -3343,10 +3404,6 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { } public void testHighlightQueryRewriteDatesWithNow() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10434", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( client().admin() .indices() @@ -3420,6 +3477,7 @@ public void testWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String type : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3477,6 +3535,7 @@ public void testWithNormalizer() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3499,6 +3558,7 @@ public void testDisableHighlightIdField() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "plain", "unified" }) { SearchResponse searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index 6b95405b3ebd4..f5d1b8234558e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -244,10 +244,6 @@ public void testWithIndexAlias() { } public void testWithIndexFilter() throws InterruptedException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10433", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword")); assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 799bbf91a567d..ed8fe74504f92 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -251,6 +251,7 @@ public void testStoredFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -358,6 +359,7 @@ public void testScriptDocAndFields() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value"); SearchResponse response = client().prepareSearch() @@ -458,6 +460,7 @@ public void testScriptWithUnsignedLong() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -547,6 +550,7 @@ public void testScriptFieldWithNanos() throws Exception { .setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -632,6 +636,7 @@ public void testScriptFieldUsingSource() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -674,6 +679,7 @@ public void testScriptFieldUsingSource() throws Exception { public void testScriptFieldsForNullReturn() throws Exception { client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) .addScriptField("test_script_1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) @@ -795,6 +801,7 @@ public void testStoredFieldsWithoutSource() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -852,6 +859,7 @@ public void testSearchFieldsMetadata() throws Exception { .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("my-index"); SearchResponse searchResponse = client().prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); @@ -866,6 +874,7 @@ public void testSearchFieldsNonLeafField() throws Exception { .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("my-index"); assertFailures( client().prepareSearch("my-index").addStoredField("field1"), @@ -932,6 +941,7 @@ public void testGetFieldsComplexField() throws Exception { ); client().prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, MediaTypeRegistry.JSON).get(); + indexRandomForConcurrentSearch("my-index"); String field = "field1.field2.field3.field4"; @@ -1039,6 +1049,7 @@ public void testDocValueFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1271,6 +1282,7 @@ public void testScriptFields() throws Exception { ); } indexRandom(true, reqs); + indexRandomForConcurrentSearch("index"); ensureSearchable(); SearchRequestBuilder req = client().prepareSearch("index"); for (String field : Arrays.asList("s", "ms", "l", "ml", "d", "md")) { @@ -1326,6 +1338,7 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1387,6 +1400,7 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1440,6 +1454,7 @@ public void testStoredFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "field1", "value1", "field2", "value2"); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1482,6 +1497,7 @@ public void testWildcardStoredFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "field1", "value1", "field2", "value2"); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); assertHitCount(searchResponse, 1L); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index 6eb528e0bb7d3..3a6624c2ad2e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -398,6 +398,7 @@ public void testBoostModeSettingWorks() throws Exception { ) ); indexRandom(true, false, indexBuilders); // force no dummy docs + indexRandomForConcurrentSearch("test"); // Test Gauss List lonlat = new ArrayList<>(); @@ -482,6 +483,7 @@ public void testParseGeoPoint() throws Exception { constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); + indexRandomForConcurrentSearch("test"); GeoPoint point = new GeoPoint(20, 11); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -535,6 +537,7 @@ public void testCombineModes() throws Exception { .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder baseQuery = functionScoreQuery( constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(2) @@ -654,6 +657,7 @@ public void testCombineModesExplain() throws Exception { constantScoreQuery(termQuery("test", "value")).queryName("query1"), ScoreFunctionBuilders.weightFactorFunction(2, "weight1") ); + indexRandomForConcurrentSearch("test"); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -762,6 +766,7 @@ public void testParseDateMath() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse sr = client().search( searchRequest().source( searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) @@ -817,6 +822,7 @@ public void testValueMissingLin() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -893,6 +899,7 @@ public void testDateWithoutOrigin() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -974,6 +981,7 @@ public void testManyDocsLin() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( @@ -1107,6 +1115,7 @@ public void testNoQueryGiven() throws Exception { client().index(indexRequest("test").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject())) .actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); // so, we indexed a string field, but now we try to score a num field ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -1171,6 +1180,7 @@ public void testMultiFieldOptions() throws Exception { ); indexRandom(true, doc1, doc2); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search(searchRequest().source(searchSource().query(baseQuery))); SearchResponse sr = response.actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java index b09914c4aa764..d53f55b98bd23 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -80,7 +80,7 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testFieldValueFactor() throws IOException { + public void testFieldValueFactor() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -99,8 +99,8 @@ public void testFieldValueFactor() throws IOException { client().prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); client().prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); client().prepareIndex("test").setId("3").setSource("body", "bar").get(); - refresh(); + indexRandomForConcurrentSearch("test"); // document 2 scores higher because 17 > 5 SearchResponse response = client().prepareSearch("test") @@ -189,7 +189,7 @@ public void testFieldValueFactor() throws IOException { } } - public void testFieldValueFactorExplain() throws IOException { + public void testFieldValueFactorExplain() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -210,6 +210,7 @@ public void testFieldValueFactorExplain() throws IOException { client().prepareIndex("test").setId("3").setSource("body", "bar").get(); refresh(); + indexRandomForConcurrentSearch("test"); // document 2 scores higher because 17 > 5 final String functionName = "func1"; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java index 88395f25700d2..3b80d437e95c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java @@ -126,10 +126,11 @@ protected Map, Object>> pluginScripts() { } } - public void testScriptScoresNested() throws IOException { + public void testScriptScoresNested() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -148,10 +149,11 @@ public void testScriptScoresNested() throws IOException { assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); } - public void testScriptScoresWithAgg() throws IOException { + public void testScriptScoresWithAgg() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -166,10 +168,11 @@ public void testScriptScoresWithAgg() throws IOException { assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } - public void testScriptScoresWithAggWithExplain() throws IOException { + public void testScriptScoresWithAggWithExplain() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -195,7 +198,7 @@ public void testScriptScoresWithAggWithExplain() throws IOException { assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } - public void testMinScoreFunctionScoreBasic() throws IOException { + public void testMinScoreFunctionScoreBasic() throws IOException, InterruptedException { float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, OpenSearchTestCase::randomFloat); float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, OpenSearchTestCase::randomFloat); index( @@ -207,6 +210,7 @@ public void testMinScoreFunctionScoreBasic() throws IOException { .endObject() ); refresh(); + indexRandomForConcurrentSearch(INDEX); ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); @@ -291,6 +295,7 @@ public void testWithEmptyFunctions() throws IOException, ExecutionException, Int assertAcked(prepareCreate("test")); index("test", "testtype", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse termQuery = client().search(searchRequest().source(searchSource().explain(true).query(termQuery("text", "text")))) .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index 1df4acac0dcf0..a91f53dae04d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -122,6 +122,7 @@ public void testPlugin() throws Exception { ).actionGet(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d"); ActionFuture response = client().search( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java index de4c85301547c..bda6284d9535a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java @@ -110,7 +110,7 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testEnforceWindowSize() { + public void testEnforceWindowSize() throws InterruptedException { createIndex("test"); // this int iters = scaledRandomIntBetween(10, 20); @@ -118,6 +118,7 @@ public void testEnforceWindowSize() { client().prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numShards = getNumShards("test").numPrimaries; for (int j = 0; j < iters; j++) { @@ -169,6 +170,7 @@ public void testRescorePhrase() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) .setRescorer( @@ -474,6 +476,7 @@ private static void assertEquivalent(String query, SearchResponse plain, SearchR public void testEquivalence() throws Exception { // no dummy docs since merges can change scores while we run queries. int numDocs = indexRandomNumbers("whitespace", -1, false); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(50, 100); for (int i = 0; i < iters; i++) { @@ -545,6 +548,7 @@ public void testExplain() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch() @@ -816,6 +820,7 @@ public void testFromSize() throws Exception { client().prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); } refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder request = client().prepareSearch(); request.setQuery(QueryBuilders.termQuery("text", "hello")); @@ -832,6 +837,7 @@ public void testRescorePhaseWithInvalidSort() throws Exception { client().prepareIndex("test").setId("" + i).setSource("number", 0).get(); } refresh(); + indexRandomForConcurrentSearch("test"); Exception exc = expectThrows( Exception.class, diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 8f43cefd2d53b..69e30fc879dd8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -135,6 +135,7 @@ public void testConsistentHitsWithSameSeed() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int outerIters = scaledRandomIntBetween(10, 20); for (int o = 0; o < outerIters; o++) { final int seed = randomInt(); @@ -207,6 +208,7 @@ public void testScoreAccessWithinScript() throws Exception { .get(); } refresh(); + indexRandomForConcurrentSearch("test"); Map params = new HashMap<>(); params.put("factor", randomIntBetween(2, 4)); @@ -298,6 +300,7 @@ public void testSeedReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; @@ -317,6 +320,7 @@ public void testSeedAndNameReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; @@ -368,6 +372,7 @@ public void testScoreRange() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; ++i) { SearchResponse searchResponse = client().prepareSearch() @@ -390,6 +395,7 @@ public void testSeeds() throws Exception { index("test", "type", "" + i, jsonBuilder().startObject().endObject()); } flushAndRefresh(); + indexRandomForConcurrentSearch("test"); assertNoFailures( client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index 00524c6e04707..ba519be04edff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -266,6 +266,7 @@ public void testShapeRelations() throws Exception { client().prepareIndex("shapes").setId("1").setSource(data, MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("shapes"); // Point in polygon SearchResponse result = client().prepareSearch() @@ -427,6 +428,7 @@ public void testBulk() throws Exception { client().admin().indices().prepareCreate("countries").setSettings(settings).setMapping(xContentBuilder).get(); BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, xContentBuilder.contentType()).get(); + indexRandomForConcurrentSearch("countries"); for (BulkItemResponse item : bulk.getItems()) { assertFalse("unable to index data", item.isFailed()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index 87435bb0bd09d..dc7c4e687c2fa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -125,6 +125,7 @@ public void testSimpleMoreLikeThis() throws Exception { client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() @@ -155,6 +156,7 @@ public void testSimpleMoreLikeThisWithTypes() throws Exception { client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() @@ -190,6 +192,7 @@ public void testMoreLikeThisForZeroTokensInOneOfTheAnalyzedFields() throws Excep ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -256,6 +259,7 @@ public void testMoreLikeThisWithAliases() throws Exception { client().index(indexRequest("test").id("4").source(jsonBuilder().startObject().field("text", "opensearch release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis on index"); SearchResponse response = client().prepareSearch() @@ -304,6 +308,7 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { client().index(indexRequest(indexName).id("3").source(jsonBuilder().startObject().field("text", "opensearch index").endObject())) .actionGet(); refresh(indexName); + indexRandomForConcurrentSearch(indexName); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1)) @@ -321,6 +326,7 @@ public void testMoreLikeThisIssue2197() throws Exception { .get(); client().admin().indices().prepareRefresh("foo").get(); assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1") })) @@ -344,6 +350,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { .setRouting("2") .get(); client().admin().indices().prepareRefresh("foo").get(); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("2") })) @@ -368,6 +375,7 @@ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { .setRouting("4000") .get(); client().admin().indices().prepareRefresh("foo").get(); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("4000") })) .get(); @@ -401,6 +409,7 @@ public void testNumericField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Implicit list of fields -> ignore numeric fields SearchResponse searchResponse = client().prepareSearch() @@ -508,6 +517,7 @@ public void testMoreLikeThisWithFieldAlias() throws Exception { index("test", "_doc", "1", "text", "lucene"); index("test", "_doc", "2", "text", "lucene release"); refresh(); + indexRandomForConcurrentSearch("test"); Item item = new Item("test", "1"); QueryBuilder query = QueryBuilders.moreLikeThisQuery(new String[] { "alias" }, null, new Item[] { item }) @@ -548,6 +558,7 @@ public void testSimpleMoreLikeInclude() throws Exception { .source(jsonBuilder().startObject().field("text", "Lucene has been ported to other programming languages").endObject()) ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running More Like This with include true"); SearchResponse response = client().prepareSearch() @@ -832,11 +843,12 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt assertHitCount(response, 1); } - public void testWithRouting() throws IOException { + public void testWithRouting() throws IOException, InterruptedException { client().prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); client().prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); client().prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); refresh("index"); + indexRandomForConcurrentSearch("index"); Item item = new Item("index", "2").routing("1"); MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = new MoreLikeThisQueryBuilder( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index bc1d2833ecbbf..b35208941d2a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -71,12 +71,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSimpleMultiSearch() { + public void testSimpleMultiSearch() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiSearchResponse response = client().prepareMultiSearch() .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) @@ -94,13 +95,14 @@ public void testSimpleMultiSearch() { assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); } - public void testSimpleMultiSearchMoreRequests() { + public void testSimpleMultiSearchMoreRequests() throws InterruptedException { createIndex("test"); int numDocs = randomIntBetween(0, 16); for (int i = 0; i < numDocs; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numSearchRequests = randomIntBetween(1, 64); MultiSearchRequest request = new MultiSearchRequest(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 83dec7b27a897..8b375841c2913 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -126,6 +126,7 @@ public void testSimpleNested() throws Exception { .get(); waitForRelocation(ClusterHealthStatus.GREEN); + indexRandomForConcurrentSearch("test"); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsBytes(), notNullValue()); @@ -293,6 +294,7 @@ public void testMultiNested() throws Exception { refresh(); // check the numDocs assertDocumentCount("test", 7); + indexRandomForConcurrentSearch("test"); // do some multi nested queries SearchResponse searchResponse = client().prepareSearch("test") @@ -485,6 +487,7 @@ public void testExplain() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) @@ -498,6 +501,10 @@ public void testExplain() throws Exception { } public void testSimpleNestedSorting() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.refresh_interval", -1)) .setMapping( @@ -567,6 +574,7 @@ public void testSimpleNestedSorting() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -596,6 +604,10 @@ public void testSimpleNestedSorting() throws Exception { } public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.refresh_interval", -1)) .setMapping( @@ -675,6 +687,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -727,6 +740,10 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { } public void testNestedSortWithMultiLevelFiltering() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "{\n" @@ -863,6 +880,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // access id = 1, read, max value, asc, should use grault and quxx SearchResponse searchResponse = client().prepareSearch() @@ -968,6 +986,10 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)) .setMapping( @@ -1035,6 +1057,7 @@ public void testLeakingSortValues() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(termQuery("_id", 2)) @@ -1056,6 +1079,10 @@ public void testLeakingSortValues() throws Exception { } public void testSortNestedWithNestedFilter() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1215,6 +1242,7 @@ public void testSortNestedWithNestedFilter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Without nested filter SearchResponse searchResponse = client().prepareSearch() @@ -1453,6 +1481,10 @@ public void testSortNestedWithNestedFilter() throws Exception { // Issue #9305 public void testNestedSortingWithNestedFilterAsFilter() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1595,6 +1627,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { .get(); assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .addSort(SortBuilders.fieldSort("users.first").setNestedPath("users").order(SortOrder.ASC)) @@ -1627,6 +1660,7 @@ public void testCheckFixedBitSetCache() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); ensureSearchable("test"); + indexRandomForConcurrentSearch("test"); // No nested mapping yet, there shouldn't be anything in the fixed bit set cache ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 4a178e7066846..8ae652082f653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -309,7 +309,11 @@ public void testDeleteWhileSearch() throws Exception { private void verifySearchContextMissingException(ShardSearchFailure[] failures) { for (ShardSearchFailure failure : failures) { Throwable cause = ExceptionsHelper.unwrapCause(failure.getCause()); - assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + if (failure.toString().contains("reader_context is already closed can't increment refCount current count")) { + // this is fine, expected search error when context is already deleted + } else { + assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index e42f12709c948..a3432bfe7e3e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -100,6 +100,7 @@ public void clearIndex() { public void testPit() throws Exception { CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + indexRandomForConcurrentSearch("index"); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); SearchResponse searchResponse = client().prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 425764b1c88d2..6e40c08ed08a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -52,7 +52,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -99,7 +98,7 @@ public Settings nodeSettings(int nodeOrdinal) { } // see #2896 - public void testStopOneNodePreferenceWithRedState() throws IOException { + public void testStopOneNodePreferenceWithRedState() throws Exception { assertAcked( prepareCreate("test").setSettings( Settings.builder().put("index.number_of_shards", cluster().numDataNodes() + 2).put("index.number_of_replicas", 0) @@ -110,6 +109,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); String[] preferences = new String[] { @@ -138,7 +138,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); } - public void testNoPreferenceRandom() { + public void testNoPreferenceRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -149,6 +149,7 @@ public void testNoPreferenceRandom() { client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -159,12 +160,13 @@ public void testNoPreferenceRandom() { assertThat(firstNodeId, not(equalTo(secondNodeId))); } - public void testSimplePreference() { + public void testSimplePreference() throws InterruptedException { client().admin().indices().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", MediaTypeRegistry.JSON).get(); ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -201,7 +203,7 @@ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { } } - public void testNodesOnlyRandom() { + public void testNodesOnlyRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -211,6 +213,7 @@ public void testNodesOnlyRandom() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); // multiple wildchar to cover multi-param usecase @@ -262,7 +265,7 @@ private void assertSearchOnRandomNodes(SearchRequestBuilder request) { assertThat(hitNodes.size(), greaterThan(1)); } - public void testCustomPreferenceUnaffectedByOtherShardMovements() { + public void testCustomPreferenceUnaffectedByOtherShardMovements() throws InterruptedException { /* * Custom preferences can be used to encourage searches to go to a consistent set of shard copies, meaning that other copies' data @@ -281,6 +284,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final String customPreference = randomAlphaOfLength(10); @@ -300,6 +304,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { prepareCreate("test2").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex)) ); ensureGreen(); + indexRandomForConcurrentSearch("test2"); assertSearchesSpecificNode("test", customPreference, nodeId); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 5f794d2abf878..ef73438114079 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.profile.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -40,20 +42,23 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -61,8 +66,32 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { + private final boolean concurrentSearchEnabled; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + + public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { + super(settings); + this.concurrentSearchEnabled = concurrentSearchEnabled; + } -public class QueryProfilerIT extends OpenSearchIntegTestCase { + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -229,6 +258,7 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -271,6 +301,7 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); + assertQueryProfileResult(result); // Check the children List children = result.getProfiledChildren(); @@ -282,12 +313,14 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); + assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); + assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -330,6 +363,7 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -375,6 +409,7 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -415,6 +450,90 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testSearchLeafForItsLeavesAndRewriteQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = 122; + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + } + + List terms = Arrays.asList("zero", "zero", "one"); + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery( + QueryBuilders.idsQuery().addIds(String.valueOf(randomInt()), String.valueOf(randomInt())), + QueryBuilders.termsQuery("field1", terms) + ).boost(randomFloat()).negativeBoost(randomFloat()); + logger.info("Query: {}", q); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + assertThat(shardResult.getValue().getNetworkTime().getInboundNetworkTime(), greaterThanOrEqualTo(0L)); + assertThat(shardResult.getValue().getNetworkTime().getOutboundNetworkTime(), greaterThanOrEqualTo(0L)); + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + List results = searchProfiles.getQueryResults(); + for (ProfileResult result : results) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled && results.get(0).equals(result)) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else if (concurrentSearchEnabled) { + assertThat(maxSliceTime, equalTo(0L)); + assertThat(minSliceTime, equalTo(0L)); + assertThat(avgSliceTime, equalTo(0L)); + assertThat(breakdown.size(), equalTo(27)); + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } } CollectorResult result = searchProfiles.getCollectorResult(); @@ -455,6 +574,7 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -494,6 +614,7 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -547,6 +668,7 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -579,4 +701,35 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } + private void assertQueryProfileResult(ProfileResult result) { + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 099eb934f4f4d..1ca5859f23bca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -186,6 +186,7 @@ public void testDocWithAllTypes() throws Exception { String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -225,6 +226,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -245,6 +247,7 @@ public void testRegexCaseInsensitivity() throws Exception { indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake")); indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("messages"); SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get(); assertNoFailures(response); @@ -282,6 +285,7 @@ public void testAllFields() throws Exception { List reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test_1"); SearchResponse resp = client().prepareSearch("test_1") .setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)) @@ -374,6 +378,7 @@ public void testLimitOnExpandedFields() throws Exception { client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("testindex"); // single field shouldn't trigger the limit doAssertOneHitForQueryString("field_A0:foo"); @@ -465,6 +470,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index 7ba582811bbc2..55029712a061c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -109,13 +109,14 @@ protected Map, Object>> pluginScripts() { // 1) only matched docs retrieved // 2) score is calculated based on a script with params // 3) min score applied - public void testScriptScore() { + public void testScriptScore() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); @@ -135,13 +136,14 @@ public void testScriptScore() { assertOrderedSearchHits(resp, "10", "8", "6"); } - public void testScriptScoreBoolQuery() { + public void testScriptScoreBoolQuery() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); @@ -155,7 +157,7 @@ public void testScriptScoreBoolQuery() { } // test that when the internal query is rewritten script_score works well - public void testRewrittenQuery() { + public void testRewrittenQuery() throws Exception { assertAcked( prepareCreate("test-index2").setSettings(Settings.builder().put("index.number_of_shards", 1)) .setMapping("field1", "type=date", "field2", "type=double") @@ -164,6 +166,7 @@ public void testRewrittenQuery() { client().prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); client().prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); refresh(); + indexRandomForConcurrentSearch("test-index2"); RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Map params = new HashMap<>(); @@ -174,7 +177,7 @@ public void testRewrittenQuery() { assertOrderedSearchHits(resp, "3", "2", "1"); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws Exception { try { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; @@ -182,6 +185,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 53bded1fc493c..03312c6e1e2f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -196,7 +196,7 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti } // see https://github.com/elastic/elasticsearch/issues/3177 - public void testIssue3177() { + public void testIssue3177() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); @@ -205,6 +205,7 @@ public void testIssue3177() { waitForRelocation(); forceMerge(); refresh(); + indexRandomForConcurrentSearch("test"); assertHitCount( client().prepareSearch() .setQuery(matchAllQuery()) @@ -394,7 +395,19 @@ public void testCommonTermsQuery() throws Exception { assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get(); + // cutoff frequency of 1 makes all terms high frequency so the query gets rewritten as a + // conjunction of all terms (the lowFreqMinimumShouldMatch parameter is effectively ignored) + searchResponse = client().prepareSearch() + .setQuery(commonTermsQuery("field1", "the huge fox").cutoffFrequency(1).lowFreqMinimumShouldMatch("2")) + .get(); + assertHitCount(searchResponse, 1L); + assertFirstHit(searchResponse, hasId("2")); + + // cutoff frequency of 100 makes all terms low frequency, so lowFreqMinimumShouldMatch=3 + // means all terms must match + searchResponse = client().prepareSearch() + .setQuery(commonTermsQuery("field1", "the huge fox").cutoffFrequency(100).lowFreqMinimumShouldMatch("3")) + .get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("2")); @@ -465,6 +478,7 @@ public void testQueryStringAnalyzedWildcard() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*")).get(); assertHitCount(searchResponse, 1L); @@ -482,11 +496,12 @@ public void testQueryStringAnalyzedWildcard() throws Exception { assertHitCount(searchResponse, 1L); } - public void testLowercaseExpandedTerms() { + public void testLowercaseExpandedTerms() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1")).get(); assertHitCount(searchResponse, 1L); @@ -499,7 +514,7 @@ public void testLowercaseExpandedTerms() { } // Issue #3540 - public void testDateRangeInQueryString() { + public void testDateRangeInQueryString() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date", "future", "type=date")); @@ -510,6 +525,7 @@ public void testDateRangeInQueryString() { client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertHitCount(searchResponse, 1L); @@ -525,7 +541,7 @@ public void testDateRangeInQueryString() { } // Issue #7880 - public void testDateRangeInQueryStringWithTimeZone_7880() { + public void testDateRangeInQueryStringWithTimeZone_7880() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -536,6 +552,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { client().prepareIndex("test").setId("1").setSource("past", now).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]").timeZone(timeZone.getId())) .get(); @@ -543,7 +560,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { } // Issue #10477 - public void testDateRangeInQueryStringWithTimeZone_10477() { + public void testDateRangeInQueryStringWithTimeZone_10477() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -552,6 +569,7 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { client().prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Timezone set with dates SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")) @@ -725,6 +743,7 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1L); @@ -741,6 +760,7 @@ public void testFiltersWithCustomCacheKey() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get(); assertHitCount(searchResponse, 1L); @@ -782,6 +802,7 @@ public void testMatchQueryFuzzy() throws Exception { client().prepareIndex("test").setId("1").setSource("text", "Unit"), client().prepareIndex("test").setId("2").setSource("text", "Unity") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.ZERO)).get(); assertHitCount(searchResponse, 0L); @@ -868,11 +889,12 @@ public void testMultiMatchQuery() throws Exception { assertFirstHit(searchResponse, hasId("1")); } - public void testMatchQueryZeroTermsQuery() { + public void testMatchQueryZeroTermsQuery() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE)) .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE)); @@ -889,11 +911,12 @@ public void testMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryZeroTermsQuery() { + public void testMultiMatchQueryZeroTermsQuery() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); client().prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must( multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE) @@ -913,11 +936,12 @@ public void testMultiMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryMinShouldMatch() { + public void testMultiMatchQueryMinShouldMatch() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); @@ -959,12 +983,13 @@ public void testMultiMatchQueryMinShouldMatch() { assertHitCount(searchResponse, 0L); } - public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException { + public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get(); @@ -991,12 +1016,13 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws assertHitCount(searchResponse, 0L); } - public void testFuzzyQueryString() { + public void testFuzzyQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("str:foobaz~1")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1L); @@ -1015,6 +1041,7 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")) .get(); @@ -1027,11 +1054,12 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { ); } - public void testSpecialRangeSyntaxInQueryString() { + public void testSpecialRangeSyntaxInQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>19")).get(); assertHitCount(searchResponse, 1L); @@ -1137,6 +1165,7 @@ public void testTermsQuery() throws Exception { public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); + indexRandomForConcurrentSearch("lookup"); assertAcked( prepareCreate("lookup2").setMapping( jsonBuilder().startObject() @@ -1152,8 +1181,11 @@ public void testTermsLookupFilter() throws Exception { .endObject() ) ); + indexRandomForConcurrentSearch("lookup2"); assertAcked(prepareCreate("lookup3").setMapping("_source", "enabled=false", "terms", "type=text")); + indexRandomForConcurrentSearch("lookup3"); assertAcked(prepareCreate("test").setMapping("term", "type=text")); + indexRandomForConcurrentSearch("test"); indexRandom( true, @@ -1279,6 +1311,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1333,6 +1366,7 @@ public void testNumericTermsAndRanges() throws Exception { .setSource("num_byte", 17, "num_short", 17, "num_integer", 17, "num_long", 17, "num_float", 17, "num_double", 17) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse; logger.info("--> term query on 1"); @@ -1439,6 +1473,7 @@ public void testNumericRangeFilter_2826() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); client().prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setPostFilter(boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4))) @@ -1535,7 +1570,7 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted assertHitCount(searchResponse, 3L); } - public void testSpanMultiTermQuery() throws IOException { + public void testSpanMultiTermQuery() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); @@ -1543,6 +1578,7 @@ public void testSpanMultiTermQuery() throws IOException { client().prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); client().prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))) @@ -1574,6 +1610,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc client().prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); client().prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery( @@ -1612,7 +1649,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc assertHitCount(searchResponse, 1L); } - public void testSimpleDFSQuery() throws IOException { + public void testSimpleDFSQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1657,6 +1694,7 @@ public void testSimpleDFSQuery() throws IOException { .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1679,8 +1717,9 @@ public void testSimpleDFSQuery() throws IOException { assertNoFailures(response); } - public void testMultiFieldQueryString() { + public void testMultiFieldQueryString() throws InterruptedException { client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); logger.info("regular"); assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")).get(), 1); @@ -1700,11 +1739,12 @@ public void testMultiFieldQueryString() { } // see #3797 - public void testMultiMatchLenientIssue3797() { + public void testMultiMatchLenientIssue3797() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)) @@ -1728,6 +1768,7 @@ public void testMinScore() throws ExecutionException, InterruptedException { client().prepareIndex("test").setId("3").setSource("score", 2.0).get(); client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f)) @@ -1737,12 +1778,13 @@ public void testMinScore() throws ExecutionException, InterruptedException { assertSecondHit(searchResponse, hasId("1")); } - public void testQueryStringWithSlopAndFields() { + public void testQueryStringWithSlopAndFields() throws InterruptedException { assertAcked(prepareCreate("test")); client().prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); client().prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")) @@ -1809,6 +1851,7 @@ public void testRangeQueryWithTimeZone() throws Exception { .setId("4") .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")) @@ -1916,11 +1959,12 @@ public void testRangeQueryWithLocaleMapping() throws Exception { assertHitCount(searchResponse, 2L); } - public void testSearchEmptyDoc() { + public void testSearchEmptyDoc() throws InterruptedException { assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}", MediaTypeRegistry.JSON)); client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); refresh(); + indexRandomForConcurrentSearch("test"); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } @@ -1948,6 +1992,7 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1978,6 +2023,7 @@ public void testRangeQueryRangeFields_24744() throws Exception { .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); @@ -2013,6 +2059,7 @@ public void testNestedQueryWithFieldAlias() throws Exception { index("index", "_doc", "1", source); refresh(); + indexRandomForConcurrentSearch("index"); QueryBuilder nestedQuery = QueryBuilders.nestedQuery( "section", @@ -2041,6 +2088,7 @@ public void testFieldAliasesForMetaFields() throws Exception { IndexRequestBuilder indexRequest = client().prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); indexRandom(true, false, indexRequest); + indexRandomForConcurrentSearch("test"); client().admin() .cluster() .prepareUpdateSettings() @@ -2073,7 +2121,7 @@ public void testFieldAliasesForMetaFields() throws Exception { /** * Test that wildcard queries on keyword fields get normalized */ - public void testWildcardQueryNormalizationOnKeywordField() { + public void testWildcardQueryNormalizationOnKeywordField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2084,6 +2132,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); @@ -2099,7 +2148,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { /** * Test that wildcard queries on text fields get normalized */ - public void testWildcardQueryNormalizationOnTextField() { + public void testWildcardQueryNormalizationOnTextField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2111,6 +2160,7 @@ public void testWildcardQueryNormalizationOnTextField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { // test default case insensitivity: false @@ -2130,10 +2180,11 @@ public void testWildcardQueryNormalizationOnTextField() { } /** tests wildcard case sensitivity */ - public void testWildcardCaseSensitivity() { + public void testWildcardCaseSensitivity() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field", "type=text")); client().prepareIndex("test").setId("1").setSource("field", "lowercase text").get(); refresh(); + indexRandomForConcurrentSearch("test"); // test case sensitive SearchResponse response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(false)).get(); @@ -2151,7 +2202,7 @@ public void testWildcardCaseSensitivity() { * Reserved characters should be excluded when the normalization is applied for keyword fields. * See https://github.com/elastic/elasticsearch/issues/46300 for details. */ - public void testWildcardQueryNormalizationKeywordSpecialCharacters() { + public void testWildcardQueryNormalizationKeywordSpecialCharacters() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2163,6 +2214,7 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { ); client().prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); + indexRandomForConcurrentSearch("test"); WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); SearchResponse searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); @@ -2213,11 +2265,12 @@ public Map> getTokenizers() { * set for fuzzy queries with "constant_score" rewrite nested inside a `span_multi` query and would cause NPEs due to an unset * {@link AttributeSource}. */ - public void testIssueFuzzyInsideSpanMulti() { + public void testIssueFuzzyInsideSpanMulti() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); ensureGreen(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder query = boolQuery().filter(spanMultiTermQueryBuilder(fuzzyQuery("field", "foobarbiz").rewrite("constant_score"))); SearchResponse response = client().prepareSearch("test").setQuery(query).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 384d2b7423e66..d8902238005da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -150,6 +150,7 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar")).get(); assertHitCount(searchResponse, 3L); @@ -199,6 +200,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("3").setSource("body", "foo bar"), client().prepareIndex("test").setId("4").setSource("body", "foo baz bar") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 1"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get(); @@ -235,6 +237,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), client().prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 5"); searchResponse = client().prepareSearch() @@ -256,7 +259,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { assertSearchHits(searchResponse, "6", "7", "8"); } - public void testNestedFieldSimpleQueryString() throws IOException { + public void testNestedFieldSimpleQueryString() throws Exception { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -275,6 +278,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { ); client().prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")).get(); assertHitCount(searchResponse, 1L); @@ -359,6 +363,8 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte client().prepareIndex("test2").setId("10").setSource("field", 5) ); refresh(); + indexRandomForConcurrentSearch("test1"); + indexRandomForConcurrentSearch("test2"); SearchResponse searchResponse = client().prepareSearch() .setAllowPartialSearchResults(true) @@ -419,6 +425,7 @@ public void testSimpleQueryStringUsesFieldAnalyzer() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("123").field("foo").field("bar")).get(); assertHitCount(searchResponse, 1L); @@ -430,6 +437,7 @@ public void testSimpleQueryStringOnIndexMetaField() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("test").field("_index")).get(); assertHitCount(searchResponse, 2L); @@ -469,6 +477,7 @@ public void testBasicAllQuery() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHitCount(resp, 2L); @@ -492,6 +501,7 @@ public void testWithDate() throws Exception { reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -523,6 +533,7 @@ public void testWithLotsOfTypes() throws Exception { client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") ); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -550,6 +561,7 @@ public void testDocWithAllTypes() throws Exception { String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -596,6 +608,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -663,6 +676,7 @@ public void testFieldAlias() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); @@ -681,6 +695,7 @@ public void testFieldAliasWithWildcardField() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); @@ -697,6 +712,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index 34967528f2c4f..ae00904f237a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -161,6 +161,7 @@ public void testCustomScriptBinaryField() throws Exception { .get(); flush(); refresh(); + indexRandomForConcurrentSearch("my-index"); SearchResponse response = client().prepareSearch() .setQuery( @@ -213,6 +214,7 @@ public void testCustomScriptBoost() throws Exception { .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value > 1"); SearchResponse response = client().prepareSearch() @@ -259,7 +261,7 @@ public void testCustomScriptBoost() throws Exception { assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws InterruptedException { try { assertAcked(prepareCreate("test-index").setMapping("num1", "type=double")); int docCount = 10; @@ -267,6 +269,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); // Execute with search.allow_expensive_queries = null => default value = false => success Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index 0eee136acac69..b82048ffc924e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -135,6 +135,7 @@ public void testSimpleScrollQueryThenFetch() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -188,6 +189,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.QUERY_THEN_FETCH) @@ -256,6 +258,7 @@ public void testScrollAndUpdateIndex() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); assertThat(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, equalTo(500L)); assertThat( @@ -328,6 +331,7 @@ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse1 = client().prepareSearch() .setQuery(matchAllQuery()) @@ -448,6 +452,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse1 = client().prepareSearch() .setQuery(matchAllQuery()) @@ -526,6 +531,7 @@ public void testDeepScrollingDoesNotBlowUp() throws Exception { .prepareUpdateSettings("index") .setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)) .get(); + indexRandomForConcurrentSearch("index"); for (SearchType searchType : SearchType.values()) { SearchRequestBuilder builder = client().prepareSearch("index") @@ -567,6 +573,7 @@ public void testStringSortMissingAscTerminates() throws Exception { ); client().prepareIndex("test").setId("1").setSource("some_field", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_last")) @@ -592,12 +599,13 @@ public void testStringSortMissingAscTerminates() throws Exception { assertThat(response.getHits().getHits().length, equalTo(0)); } - public void testCloseAndReopenOrDeleteWithActiveScroll() { + public void testCloseAndReopenOrDeleteWithActiveScroll() throws InterruptedException { createIndex("test"); for (int i = 0; i < 100; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(35) @@ -683,7 +691,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } - public void testInvalidScrollKeepAlive() throws IOException { + public void testInvalidScrollKeepAlive() throws IOException, InterruptedException { createIndex("test"); for (int i = 0; i < 2; i++) { client().prepareIndex("test") @@ -692,6 +700,7 @@ public void testInvalidScrollKeepAlive() throws IOException { .get(); } refresh(); + indexRandomForConcurrentSearch("test"); assertAcked( client().admin() .cluster() @@ -733,7 +742,7 @@ public void testInvalidScrollKeepAlive() throws IOException { * Ensures that we always create and retain search contexts on every target shards for a scroll request * regardless whether that query can be written to match_no_docs on some target shards or not. */ - public void testScrollRewrittenToMatchNoDocs() { + public void testScrollRewrittenToMatchNoDocs() throws InterruptedException { final int numShards = randomIntBetween(3, 5); assertAcked( client().admin() @@ -746,6 +755,7 @@ public void testScrollRewrittenToMatchNoDocs() { client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = null; try { int totalHits = 0; @@ -793,6 +803,7 @@ public void testRestartDataNodesDuringScrollSearch() throws Exception { index("prod", "_doc", "prod-" + i, Collections.emptyMap()); } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("demo", "prod"); SearchResponse respFromDemoIndex = client().prepareSearch("demo") .setSize(randomIntBetween(1, 10)) .setQuery(new MatchAllQueryBuilder()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index bee242b933dfd..81e948640ee94 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -154,7 +154,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); } - public void testIssue8226() { + public void testIssue8226() throws InterruptedException { int numIndices = between(5, 10); final boolean useMapping = randomBoolean(); for (int i = 0; i < numIndices; i++) { @@ -168,6 +168,9 @@ public void testIssue8226() { } } refresh(); + for (int i = 0; i < numIndices; i++) { + indexRandomForConcurrentSearch("test_" + i); + } // sort DESC SearchResponse searchResponse = client().prepareSearch() .addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long")) @@ -278,6 +281,7 @@ public void testTrackScores() throws Exception { jsonBuilder().startObject().field("id", "2").field("svalue", "bbb").field("ivalue", 200).field("dvalue", 0.2).endObject() ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); @@ -446,6 +450,7 @@ public void testScoreSortDirection() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field"))) @@ -484,6 +489,7 @@ public void testScoreSortDirectionWithFunctionScore() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) @@ -531,7 +537,7 @@ public void testIssue2986() { } } - public void testIssue2991() { + public void testIssue2991() throws InterruptedException { for (int i = 1; i < 4; i++) { try { client().admin().indices().prepareDelete("test").get(); @@ -552,6 +558,7 @@ public void testIssue2991() { client().prepareIndex("test").setId("2").setSource("tag", "beta").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test") .setSize(2) .setQuery(matchAllQuery()) @@ -605,6 +612,9 @@ public void testSimpleSorts() throws Exception { .startObject("float_value") .field("type", "float") .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() .startObject("double_value") .field("type", "double") .endObject() @@ -628,6 +638,7 @@ public void testSimpleSorts() throws Exception { .field("long_value", i) .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) .field("float_value", 0.1 * i) + .field("half_float_value", 0.1 * i) .field("double_value", 0.1 * i) .endObject() ); @@ -646,6 +657,7 @@ public void testSimpleSorts() throws Exception { } refresh(); + indexRandomForConcurrentSearch("test"); // STRING int size = 1 + random.nextInt(10); @@ -794,6 +806,28 @@ public void testSimpleSorts() throws Exception { assertThat(searchResponse.toString(), not(containsString("error"))); + // HALF_FLOAT + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, 10L); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, 10); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + // DOUBLE size = 1 + random.nextInt(10); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); @@ -896,6 +930,7 @@ public void testSortMissingNumbers() throws Exception { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // DOUBLE logger.info("--> sort with no missing (same as missing _last)"); @@ -1057,6 +1092,7 @@ public void testSortMissingNumbersMinMax() throws Exception { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // LONG logger.info("--> sort with no missing (same as missing _last)"); @@ -1176,7 +1212,7 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); } - public void testSortMissingStrings() throws IOException { + public void testSortMissingStrings() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1204,6 +1240,7 @@ public void testSortMissingStrings() throws IOException { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // TODO: WTF? try { @@ -1330,6 +1367,9 @@ public void testSortMVField() throws Exception { .startObject("float_values") .field("type", "float") .endObject() + .startObject("half_float_values") + .field("type", "float") + .endObject() .startObject("double_values") .field("type", "double") .endObject() @@ -1351,6 +1391,7 @@ public void testSortMVField() throws Exception { .array("short_values", 1, 5, 10, 8) .array("byte_values", 1, 5, 10, 8) .array("float_values", 1f, 5f, 10f, 8f) + .array("half_float_values", 1f, 5f, 10f, 8f) .array("double_values", 1d, 5d, 10d, 8d) .array("string_values", "01", "05", "10", "08") .endObject() @@ -1365,6 +1406,7 @@ public void testSortMVField() throws Exception { .array("short_values", 11, 15, 20, 7) .array("byte_values", 11, 15, 20, 7) .array("float_values", 11f, 15f, 20f, 7f) + .array("half_float_values", 11f, 15f, 20f, 7f) .array("double_values", 11d, 15d, 20d, 7d) .array("string_values", "11", "15", "20", "07") .endObject() @@ -1379,6 +1421,7 @@ public void testSortMVField() throws Exception { .array("short_values", 2, 1, 3, -4) .array("byte_values", 2, 1, 3, -4) .array("float_values", 2f, 1f, 3f, -4f) + .array("half_float_values", 2f, 1f, 3f, -4f) .array("double_values", 2d, 1d, 3d, -4d) .array("string_values", "02", "01", "03", "!4") .endObject() @@ -1386,6 +1429,7 @@ public void testSortMVField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1585,6 +1629,34 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.ASC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.DESC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); @@ -1642,7 +1714,7 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); } - public void testSortOnRareField() throws IOException { + public void testSortOnRareField() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1662,6 +1734,7 @@ public void testSortOnRareField() throws IOException { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(3) @@ -1757,6 +1830,7 @@ public void testSortMetaField() throws Exception { indexReqs[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource(); } indexRandom(true, indexReqs); + indexRandomForConcurrentSearch("test"); SortOrder order = randomFrom(SortOrder.values()); SearchResponse searchResponse = client().prepareSearch() @@ -1859,6 +1933,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // We sort on nested field SearchResponse searchResponse = client().prepareSearch() @@ -2044,6 +2119,7 @@ public void testFieldAlias() throws Exception { builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("old_index", "new_index"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -2070,6 +2146,7 @@ public void testFieldAliasesWithMissingValues() throws Exception { builders.add(client().prepareIndex("old_index").setSource(Collections.emptyMap())); builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("old_index", "new_index"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -2139,6 +2216,7 @@ public void testCastDate() throws Exception { builders.add(client().prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); builders.add(client().prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("index_date", "index_date_nanos"); { SearchResponse response = client().prepareSearch() @@ -2264,7 +2342,7 @@ public void testCastNumericTypeExceptions() throws Exception { } } - public void testLongSortOptimizationCorrectResults() { + public void testLongSortOptimizationCorrectResults() throws InterruptedException { assertAcked( prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)) .setMapping("long_field", "type=long") @@ -2281,6 +2359,7 @@ public void testLongSortOptimizationCorrectResults() { bulkBuilder.add(client().prepareIndex("test1").setId(Integer.toString(i)).setSource(source, MediaTypeRegistry.JSON)); } refresh(); + indexRandomForConcurrentSearch("test1"); // *** 1. sort DESC on long_field SearchResponse searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 6886f8d67589e..766ac6139b24b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -192,6 +192,7 @@ public void testDistanceSortingMVFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test") @@ -324,6 +325,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test") @@ -602,7 +604,7 @@ public void testDistanceSortingNestedFields() throws Exception { /** * Issue 3073 */ - public void testGeoDistanceFilter() throws IOException { + public void testGeoDistanceFilter() throws IOException, InterruptedException { Version version = VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; @@ -620,6 +622,7 @@ public void testGeoDistanceFilter() throws IOException { assertAcked(prepareCreate("locations").setSettings(settings).setMapping(mapping)); client().prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); refresh(); + indexRandomForConcurrentSearch("locations"); client().prepareGet("locations", "1").get(); SearchResponse result = client().prepareSearch("locations") @@ -668,6 +671,7 @@ public void testDistanceSortingWithUnmappedField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test1", "test2"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test1", "test2") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 7bcded86fcaa8..76e68781c72ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -59,6 +59,7 @@ public void testPluginSort() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").addSort(new CustomSortBuilder("field", SortOrder.ASC)).get(); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("3")); @@ -80,6 +81,7 @@ public void testPluginSortXContent() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); // builder -> json -> builder SearchResponse searchResponse = client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index c98a38ea0bb97..a9c4bf841d9a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -76,12 +76,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSimple() { + public void testSimple() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); @@ -93,12 +94,12 @@ public void testSimple() { assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } - public void testInnerHits() { + public void testInnerHits() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("nested", "type=nested")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .storedFields("_none_") .setFetchSource(false) @@ -119,12 +120,13 @@ public void testInnerHits() { assertThat(hits.getAt(0).getSourceAsString(), nullValue()); } - public void testWithRouting() { + public void testWithRouting() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index eeef5403fe898..805e82dc9850b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -66,12 +66,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSourceDefaultBehavior() { + public void testSourceDefaultBehavior() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field", "value"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); @@ -84,12 +85,13 @@ public void testSourceDefaultBehavior() { } - public void testSourceFiltering() { + public void testSourceFiltering() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); @@ -117,12 +119,13 @@ public void testSourceFiltering() { * Test Case for #5132: Source filtering with wildcards broken when given multiple patterns * https://github.com/elastic/elasticsearch/issues/5132 */ - public void testSourceWithWildcardFiltering() { + public void testSourceWithWildcardFiltering() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null).get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 30dba87f8ef5d..b342e6d35f0b4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -79,6 +79,7 @@ import java.util.Set; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -1171,6 +1172,7 @@ public void testSkipDuplicates() throws Exception { createIndexAndMapping(mapping); int numDocs = randomIntBetween(10, 100); int numUnique = randomIntBetween(1, numDocs); + logger.info("Suggestion duplicate parameters: numDocs {} numUnique {}", numDocs, numUnique); List indexRequestBuilders = new ArrayList<>(); int[] weights = new int[numUnique]; Integer[] termIds = new Integer[numUnique]; @@ -1180,8 +1182,10 @@ public void testSkipDuplicates() throws Exception { int weight = randomIntBetween(0, 100); weights[id] = Math.max(weight, weights[id]); String suggestion = "suggestion-" + String.format(Locale.ENGLISH, "%03d", id); + logger.info("Creating {}, id {}, weight {}", suggestion, i, id, weight); indexRequestBuilders.add( client().prepareIndex(INDEX) + .setRefreshPolicy(WAIT_UNTIL) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1195,10 +1199,12 @@ public void testSkipDuplicates() throws Exception { indexRandom(true, indexRequestBuilders); Arrays.sort(termIds, Comparator.comparingInt(o -> weights[(int) o]).reversed().thenComparingInt(a -> (int) a)); + logger.info("Expected terms id ordered {}", (Object[]) termIds); String[] expected = new String[numUnique]; for (int i = 0; i < termIds.length; i++) { expected[i] = "suggestion-" + String.format(Locale.ENGLISH, "%03d", termIds[i]); } + logger.info("Expected suggestions field values {}", (Object[]) expected); CompletionSuggestionBuilder completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("sugg") .skipDuplicates(true) @@ -1207,6 +1213,7 @@ public void testSkipDuplicates() throws Exception { SearchResponse searchResponse = client().prepareSearch(INDEX) .suggest(new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder)) .get(); + logger.info("Search Response with Suggestions {}", searchResponse); assertSuggestions(searchResponse, true, "suggestions", expected); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index 32bb0e34054bb..bc6e49008bf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -287,6 +287,7 @@ public void testSizeOneShard() throws Exception { index("test", "type1", Integer.toString(i), "text", "abc" + i); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); @@ -369,6 +370,7 @@ public void testSimple() throws Exception { index("test", "type1", "3", "text", "abbd"); index("test", "type1", "4", "text", "abcc"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); diff --git a/server/src/main/java/org/opensearch/OpenSearchGenerationException.java b/server/src/main/java/org/opensearch/OpenSearchGenerationException.java index 50b021d41123f..51093390d57e0 100644 --- a/server/src/main/java/org/opensearch/OpenSearchGenerationException.java +++ b/server/src/main/java/org/opensearch/OpenSearchGenerationException.java @@ -32,6 +32,7 @@ package org.opensearch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -39,8 +40,9 @@ /** * A generic exception indicating failure to generate. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchGenerationException extends OpenSearchException { public OpenSearchGenerationException(String msg) { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index ffba4d2eb50c0..c30da41066dbd 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -33,10 +33,12 @@ package org.opensearch.action; import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.PublicApi; /** * Base exception for an action request validation extendable by plugins * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 3632ba2d7304f..8fe6f49c08af0 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -32,14 +32,17 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; + /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to * one or more indices and one or more aliases. Meant to be used for aliases management requests (e.g. add/remove alias, * get aliases) that hold aliases and indices in separate fields. * Allows to retrieve which indices and aliases the action relates to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AliasesRequest extends IndicesRequest.Replaceable { /** diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index 31f61e76c74ff..e09c76430defc 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,8 +56,9 @@ * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.opensearch.action.bulk.BulkRequest} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DocWriteRequest extends IndicesRequest, Accountable { // Flag set for disallowing index auto creation for an individual write request. @@ -169,7 +171,10 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { /** * Requested operation type to perform on the document + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum OpType { /** * Index the source. If there an existing document with the id, it will diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index e3052b3b80035..aada56ed93fd3 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -64,8 +65,9 @@ /** * A base class for the response of a write operation that involves a single doc * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject { private static final String _SHARDS = "_shards"; @@ -80,7 +82,10 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr /** * An enum that represents the results of CRUD operations, primarily used to communicate the type of * operation that occurred. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Result implements Writeable { CREATED(0), UPDATED(1), diff --git a/server/src/main/java/org/opensearch/action/IndicesRequest.java b/server/src/main/java/org/opensearch/action/IndicesRequest.java index 7e4c2f5076cda..f42818595d3cc 100644 --- a/server/src/main/java/org/opensearch/action/IndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/IndicesRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to @@ -40,8 +41,9 @@ * In case of internal requests originated during the distributed execution of an external request, * they will still return the indices that the original request related to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndicesRequest { /** diff --git a/server/src/main/java/org/opensearch/action/OriginalIndices.java b/server/src/main/java/org/opensearch/action/OriginalIndices.java index 1e24c64bc60fc..1c26bf9d416f5 100644 --- a/server/src/main/java/org/opensearch/action/OriginalIndices.java +++ b/server/src/main/java/org/opensearch/action/OriginalIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Used to keep track of original indices within internal (e.g. shard level) requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class OriginalIndices implements IndicesRequest { // constant to use when original indices are not applicable and will not be serialized across the wire diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 3dec781f0acf4..4a5c3b076faba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -35,6 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * A request to explain the allocation of a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainRequest extends ClusterManagerNodeRequest { private static final ObjectParser PARSER = new ObjectParser<>("cluster/allocation/explain"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index d85cb3929873d..31781dda04957 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for requests to explain the allocation of a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterAllocationExplainRequest, ClusterAllocationExplainResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 0eeedb9af0ab7..17afdd862cf66 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.allocation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Explanation response for a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainResponse extends ActionResponse { private ClusterAllocationExplanation cae; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index f4705a21f5014..3c8f07613561d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -58,8 +59,9 @@ * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java index bd783b349bed4..6e4d7b84f204e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.crypto; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.io.stream.StreamInput; @@ -28,8 +29,9 @@ /** * Crypto settings supplied during a put repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CryptoSettings implements Writeable, ToXContentObject { private String keyProviderName; private String keyProviderType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java index 79b7381801da6..8243be21ba487 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,8 +19,9 @@ /** * Request for deleting decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest { public DeleteDecommissionStateRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java index 08f194c53f18e..94075d6ec860d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for Delete decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteDecommissionStateRequest, DeleteDecommissionStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java index 3d0404c25373b..13b056a019200 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.delete; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Response returned after deletion of decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateResponse extends AcknowledgedResponse { public DeleteDecommissionStateResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java index 15c7e165fb62f..3ecbbfbbc7285 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -21,8 +22,9 @@ /** * Get Decommissioned attribute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { private String attributeName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java index e766e9c674ff7..13eb375f0d00e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Get decommission request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetDecommissionStateRequest, GetDecommissionStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java index bbcbd7013c299..9010c0e7d9388 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -10,6 +10,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -26,8 +27,9 @@ /** * Response for decommission status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { private String attributeValue; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java index 18f14db3c2b8d..9070aa5a0dc55 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -23,8 +24,9 @@ /** * Registers a decommission request with decommission attribute and timeout * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionRequest extends ClusterManagerNodeRequest { public static final TimeValue DEFAULT_NODE_DRAINING_TIMEOUT = TimeValue.timeValueSeconds(120); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java index c3591fff54885..e965110cdb9df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -12,13 +12,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Register decommission request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DecommissionRequest, DecommissionResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java index 13c1775b005b3..a2401cdf91b07 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.put; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -18,8 +19,9 @@ /** * Response for decommission request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionResponse extends AcknowledgedResponse implements ToXContentObject { public DecommissionResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index a9532077f9129..ec8b01d853da6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -40,6 +40,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ /** * Transport request for requesting cluster health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; @@ -351,8 +353,9 @@ public ActionRequestValidationException validate() { /** * The level of the health request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Level { CLUSTER, INDICES, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index cca9d35d8aa6f..a9a3756755265 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -38,13 +38,15 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder for requesting cluster health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterHealthRequest, ClusterHealthResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index fb68012502116..1a27f161343e8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.health.ClusterIndexHealth; import org.opensearch.cluster.health.ClusterStateHealth; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; @@ -67,8 +68,9 @@ /** * Transport response for Cluster Health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthResponse extends ActionResponse implements StatusToXContentObject { private static final String CLUSTER_NAME = "cluster_name"; private static final String STATUS = "status"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 39e912a995f35..9e52b90f7bd38 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.hotthreads; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsRequest extends BaseNodesRequest { int threads = 3; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 3639439dd3fb8..51b455b41115b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder class for requesting OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< NodesHotThreadsRequest, NodesHotThreadsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index 5af9ce50a4bfe..eeddd2deb7ff8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport response for OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsResponse extends BaseNodesResponse { public NodesHotThreadsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index 31cacda7c3487..e694a5e102e02 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.info; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * A request to get node (cluster) level information. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoRequest extends BaseNodesRequest { private Set requestedMetrics = Metric.allMetrics(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index 76ef75b77a1cf..4c3191b017948 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport action for OpenSearch Node Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder { public NodesInfoRequestBuilder(OpenSearchClient client, NodesInfoAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java index 5b4444053a8b7..7ddd70185e8ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; @@ -59,8 +60,9 @@ /** * Transport response for OpenSearch Node Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoResponse extends BaseNodesResponse implements ToXContentFragment { public NodesInfoResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index 2f2162947aeea..d970e4c9e5468 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.settings.SecureString; /** * Builder for the reload secure settings nodes request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder< NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index dd36b3b8db3ab..874713b51d627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -46,6 +46,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -56,6 +57,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -127,6 +130,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchBackpressureStats searchBackpressureStats; + @Nullable + private SegmentReplicationRejectionStats segmentReplicationRejectionStats; + @Nullable private ClusterManagerThrottlingStats clusterManagerThrottlingStats; @@ -142,6 +148,12 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchPipelineStats searchPipelineStats; + @Nullable + private NodesResourceUsageStats resourceUsageStats; + + @Nullable + private RepositoriesStats repositoriesStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -198,6 +210,21 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); + } else { + resourceUsageStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + segmentReplicationRejectionStats = in.readOptionalWriteable(SegmentReplicationRejectionStats::new); + } else { + segmentReplicationRejectionStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); + } else { + repositoriesStats = null; + } } public NodeStats( @@ -216,6 +243,7 @@ public NodeStats( @Nullable DiscoveryStats discoveryStats, @Nullable IngestStats ingestStats, @Nullable AdaptiveSelectionStats adaptiveSelectionStats, + @Nullable NodesResourceUsageStats resourceUsageStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, @Nullable ShardIndexingPressureStats shardIndexingPressureStats, @@ -224,7 +252,9 @@ public NodeStats( @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, - @Nullable SearchPipelineStats searchPipelineStats + @Nullable SearchPipelineStats searchPipelineStats, + @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, + @Nullable RepositoriesStats repositoriesStats ) { super(node); this.timestamp = timestamp; @@ -241,6 +271,7 @@ public NodeStats( this.discoveryStats = discoveryStats; this.ingestStats = ingestStats; this.adaptiveSelectionStats = adaptiveSelectionStats; + this.resourceUsageStats = resourceUsageStats; this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.shardIndexingPressureStats = shardIndexingPressureStats; @@ -250,6 +281,8 @@ public NodeStats( this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; this.searchPipelineStats = searchPipelineStats; + this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; + this.repositoriesStats = repositoriesStats; } public long getTimestamp() { @@ -344,6 +377,11 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } + @Nullable + public NodesResourceUsageStats getResourceUsageStats() { + return resourceUsageStats; + } + @Nullable public ScriptCacheStats getScriptCacheStats() { return scriptCacheStats; @@ -387,6 +425,16 @@ public SearchPipelineStats getSearchPipelineStats() { return searchPipelineStats; } + @Nullable + public SegmentReplicationRejectionStats getSegmentReplicationRejectionStats() { + return segmentReplicationRejectionStats; + } + + @Nullable + public RepositoriesStats getRepositoriesStats() { + return repositoriesStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -430,6 +478,15 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(resourceUsageStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(segmentReplicationRejectionStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(repositoriesStats); + } } @Override @@ -520,7 +577,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getSearchPipelineStats() != null) { getSearchPipelineStats().toXContent(builder, params); } + if (getResourceUsageStats() != null) { + getResourceUsageStats().toXContent(builder, params); + } + if (getSegmentReplicationRejectionStats() != null) { + getSegmentReplicationRejectionStats().toXContent(builder, params); + } + if (getRepositoriesStats() != null) { + getRepositoriesStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index b0caa469033eb..22a667e7e8f6f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * A request to get node (cluster) level stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsRequest extends BaseNodesRequest { private CommonStatsFlags indices = new CommonStatsFlags(); @@ -213,7 +215,10 @@ public enum Metric { WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), - SEARCH_PIPELINE("search_pipeline"); + SEARCH_PIPELINE("search_pipeline"), + RESOURCE_USAGE_STATS("resource_usage_stats"), + SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), + REPOSITORIES("repositories"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index e382278f5ddb8..58149e9a34a34 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for obtaining OpenSearch Node Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder< NodesStatsRequest, NodesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 8fbc52ad8a3f8..73a938568acc3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport response for obtaining OpenSearch Node Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsResponse extends BaseNodesResponse implements ToXContentFragment { public NodesStatsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 615abbaef845d..99cf42cfdc4d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -124,7 +124,10 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index 360765e8f4803..183fb2a236148 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; import org.opensearch.action.support.tasks.BaseTasksRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.tasks.CancellableTask; @@ -44,8 +45,9 @@ /** * A request to cancel tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksRequest extends BaseTasksRequest { public static final String DEFAULT_REASON = "by user request"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index ee19e8b104603..ac02dfdf7381f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.tasks.TasksRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for the request to cancel tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksRequestBuilder extends TasksRequestBuilder { public CancelTasksRequestBuilder(OpenSearchClient client, CancelTasksAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 979489999cc6e..1f86c7c22c2eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -49,8 +50,9 @@ /** * Returns the list of tasks that were cancelled * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksResponse extends ListTasksResponse { private static final ConstructingObjectParser PARSER = setupParser( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index 82902c55b1b0a..13c6d645d4c3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * A request to get node tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskRequest extends ActionRequest { private TaskId taskId = TaskId.EMPTY_TASK_ID; private boolean waitForCompletion = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java index dd429da82ebf8..ea42e1770e7f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; /** * Builder for the request to retrieve the list of tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskRequestBuilder extends ActionRequestBuilder { public GetTaskRequestBuilder(OpenSearchClient client, GetTaskAction action) { super(client, action, new GetTaskRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java index 9d4e6da8c7e62..80901373e14d5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.node.tasks.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -48,8 +49,9 @@ /** * Returns the list of tasks currently running on the nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskResponse extends ActionResponse implements ToXContentObject { private final TaskResult task; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 070b93c788ef0..6ee56b0da7884 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.tasks.list; import org.opensearch.action.support.tasks.BaseTasksRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * A request to get node tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksRequest extends BaseTasksRequest { private boolean detailed = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index 45beb0dd899b5..a195b98d06e76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.tasks.TasksRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for the request to retrieve the list of tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksRequestBuilder extends TasksRequestBuilder { public ListTasksRequestBuilder(OpenSearchClient client, ListTasksAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 693566391fb25..337151a2a9268 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -64,8 +65,9 @@ /** * Returns the list of tasks currently running on the nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { private static final String TASKS = "tasks"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java index 0ca114ae0ed5c..fb23a41b8dc19 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.node.tasks.list; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.TaskInfo; @@ -45,8 +46,9 @@ /** * Information about a currently running task and all its subtasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TaskGroup implements ToXContentObject { private final TaskInfo task; @@ -65,8 +67,9 @@ public static Builder builder(TaskInfo taskInfo) { /** * Builder for the Task Group * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private TaskInfo taskInfo; private List childTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java index e31b88ace953f..385f48d5690c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.usage; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Transport request for collecting OpenSearch telemetry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageRequest extends BaseNodesRequest { private boolean restActions; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java index 7d1823b59dc04..ec1176ac634fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for collecting OpenSearch telemetry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< NodesUsageRequest, NodesUsageResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java index 575d88c10317c..7dc8a318b2cf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ * The response for the nodes usage api which contains the individual usage * statistics for all nodes queried. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageResponse extends BaseNodesResponse implements ToXContentFragment { public NodesUsageResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java index 3b090415c175b..afa2058d1deba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,8 +31,9 @@ /** * Restore remote store request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class RestoreRemoteStoreRequest extends ClusterManagerNodeRequest implements ToXContentObject { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java index 66908889b0641..3de926c256f46 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.restore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -28,8 +29,9 @@ /** * Contains information about remote store restores * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public final class RestoreRemoteStoreResponse extends ActionResponse implements ToXContentObject { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java index f292fcec7ccac..7bddd0deb373b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -22,8 +23,9 @@ /** * Encapsulates all remote store stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStats implements Writeable, ToXContentFragment { /** * Stats related to Remote Segment Store operations diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java index f09cf79c5154c..12c316adc75cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Encapsulates all remote store stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsRequest extends BroadcastRequest { private String[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java index c31e4a1fd6178..4da700d3dc51b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java @@ -10,13 +10,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder for RemoteStoreStatsRequest * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsRequestBuilder extends BroadcastOperationRequestBuilder< RemoteStoreStatsRequest, RemoteStoreStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java index 63a4c97d695b7..cad57d148770b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -25,8 +26,9 @@ /** * Remote Store stats response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsResponse extends BroadcastResponse { private final RemoteStoreStats[] remoteStoreStats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index 7e3f4cd95fc72..3e408c6114690 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CleanupRepositoryRequest extends AcknowledgedRequest { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java index 95c4fb372572f..34e42b157e627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CleanupRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CleanupRepositoryRequest, CleanupRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java index 99af862c769e8..e6790e8cbe708 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -31,6 +31,7 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -46,8 +47,9 @@ /** * Transport response for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 352a3772e039b..04fdf22bee593 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ *

* The unregister repository command just unregisters the repository. No data is getting deleted from the repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRepositoryRequest extends AcknowledgedRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index ffef8d5b41979..6f5d0495e1c9f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for unregister repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< DeleteRepositoryRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 6fb086f65497e..80a86f1b79209 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Get repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesRequest extends ClusterManagerNodeReadRequest { private String[] repositories = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 4b93aff4c25bc..b0c18f952b3df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Get repository request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetRepositoriesRequest, GetRepositoriesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 098a0e60142e7..f467b240aac31 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,16 +43,17 @@ import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** * Get repositories response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesResponse extends ActionResponse implements ToXContentObject { private RepositoriesMetadata repositories; @@ -83,7 +85,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); repositories.toXContent( builder, - new DelegatingMapParams(Collections.singletonMap(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params) + new DelegatingMapParams( + Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true", RepositoriesMetadata.HIDE_SYSTEM_REPOSITORY_SETTING, "true"), + params + ) ); builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 582f73f335b49..3fbd5743b5c8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -57,8 +58,9 @@ * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutRepositoryRequest extends AcknowledgedRequest implements ToXContentObject { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index cf649ee6b4cbf..22aa6d7dc7c00 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Register repository request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< PutRepositoryRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index b84161e716f5d..ae6c92d8625ca 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Verify repository request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryRequest extends AcknowledgedRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index c405fb9bc12cd..023f223700775 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for verify repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< VerifyRepositoryRequest, VerifyRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index d7af3478bdac3..12d6a4cca5683 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.verify; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -54,8 +55,9 @@ /** * Verify repository response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { static final String NODES = "nodes"; @@ -64,8 +66,9 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte /** * Inner Node View * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NodeView implements Writeable, ToXContentObject { private static final ObjectParser.NamedObjectParser PARSER; static { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index a6addce14787d..1cefaa4866110 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Request to submit cluster reroute allocation commands * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteRequest extends AcknowledgedRequest { private AllocationCommands commands = new AllocationCommands(); private boolean dryRun; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 01d52cb43320d..fc8310bdf7852 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; +import org.opensearch.common.annotation.PublicApi; /** * Builder for a cluster reroute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder< ClusterRerouteRequest, ClusterRerouteResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index a62029218ca25..ff01888040e82 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.RoutingExplanations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -46,8 +47,9 @@ /** * Response returned after a cluster reroute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXContentObject { private final ClusterState state; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 6b62a5edd8c28..77aee99c2e902 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Request for an update cluster settings action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsRequest extends AcknowledgedRequest implements ToXContentObject { private static final ParseField PERSISTENT = new ParseField("persistent"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 4d08c94f78b6a..53f1f17bbeb50 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -42,8 +43,9 @@ /** * Builder for a cluster update settings request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 1c543260f7aee..2dfdb49736773 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * A response for a cluster update settings action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { private static final ParseField PERSISTENT = new ParseField("persistent"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 84e9554932864..eaafca21e1894 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.shards; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Transport action for searching shard groups * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsGroup implements Writeable, ToXContentObject { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 7ddb945ad911e..62e05ebb37e28 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport request for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 674a2c2c36221..c4f8a29bbcf3d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterSearchShardsRequest, ClusterSearchShardsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 2b24d870219bb..8ab6455bca4a7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.shards; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport response for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject { private final ClusterSearchShardsGroup[] groups; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java index 7f69fe9fe3c72..72de28ca7e699 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -32,8 +33,9 @@ /** * Request to delete weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingRequest extends ClusterManagerNodeRequest { private static final Logger logger = LogManager.getLogger(ClusterDeleteWeightedRoutingRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java index bb34fea589534..e0d4e0ad2abed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to delete weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterDeleteWeightedRoutingRequest, ClusterDeleteWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java index 2a417e9f4287f..f6a18ae5055ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Response from deleting weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingResponse extends AcknowledgedResponse { ClusterDeleteWeightedRoutingResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java index 7dcec15c750fc..937829de1db00 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,8 +21,9 @@ /** * Request to get weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingRequest extends ClusterManagerNodeReadRequest { String awarenessAttribute; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java index 82f4c1106461d..3cb5e7d3d07b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to get weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterGetWeightedRoutingRequest, ClusterGetWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java index 5ef14d0e00c49..b109ecb7de5d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -28,8 +29,9 @@ /** * Response from fetching weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingResponse extends ActionResponse implements ToXContentObject { private static final String WEIGHTS = "weights"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java index 8e8432a384aa5..c310e28610184 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java @@ -16,6 +16,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -37,8 +38,9 @@ /** * Request to update weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingRequest extends ClusterManagerNodeRequest { private static final Logger logger = LogManager.getLogger(ClusterPutWeightedRoutingRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java index adfb2cf02f6d9..c520b0efd9838 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java @@ -11,12 +11,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to update weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterPutWeightedRoutingRequest, ClusterPutWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java index cbf10aa74f8a2..4fee2f05a8715 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.put; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Response from updating weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingResponse extends AcknowledgedResponse { public ClusterPutWeightedRoutingResponse(boolean acknowledged) { super(acknowledged); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 694b13e37bb03..4a05911610137 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Transport request for cloning a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloneSnapshotRequest extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 0f47d77d6a9d3..839a1b935ad1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -37,13 +37,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; /** * Transport request builder for cloning a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloneSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 0cfc24994768a..0ad3071a99045 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.Strings; @@ -76,8 +77,9 @@ *

  • must not contain invalid file name characters {@link Strings#INVALID_FILENAME_CHARS}
  • * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotRequest extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 40d440419819c..c378c416cc973 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -43,8 +44,9 @@ /** * Create snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CreateSnapshotRequest, CreateSnapshotResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index 1517112b9b3e1..5d25cf6cacfab 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.create; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -51,8 +52,9 @@ /** * Create snapshot response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index d08c3033e7e10..21280381610f7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ * Delete snapshot request removes snapshots from the repository and cleans up all files that are associated with the snapshots. * All files that are shared with at least one other existing snapshot are left intact. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteSnapshotRequest extends ClusterManagerNodeRequest { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 1fa855265ffd4..f4da1ec0f7785 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Delete snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 8e2e2d3390058..7e95885c60f93 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Get snapshot request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsRequest extends ClusterManagerNodeRequest { public static final String ALL_SNAPSHOTS = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 3434f1cb47a99..983325aa575d7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Get snapshots request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< GetSnapshotsRequest, GetSnapshotsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 5d5de4c7fa85e..6b0e8ba8a372f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -53,8 +54,9 @@ /** * Get snapshots response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsResponse extends ActionResponse implements ToXContentObject { @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index c688b13ed9fdd..492ef86bb7843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -63,15 +64,19 @@ /** * Restore snapshot request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotRequest extends ClusterManagerNodeRequest implements ToXContentObject { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestoreSnapshotRequest.class); /** * Enumeration of possible storage types + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum StorageType { LOCAL("local"), REMOTE_SNAPSHOT("remote_snapshot"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index d9cca536d1c41..39eaadf3c8de6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Restore snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RestoreSnapshotRequest, RestoreSnapshotResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index f6e50c69da3dc..c94645a6deb8f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -53,8 +54,9 @@ /** * Contains information about restores snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotResponse extends ActionResponse implements ToXContentObject { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 6e250962d1210..cd7f4b392c61d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -32,11 +32,14 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; + /** * Stage for snapshotting an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SnapshotIndexShardStage { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index fc3ffd4977da5..f991e90cb0728 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.broadcast.BroadcastShardResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -56,8 +57,9 @@ /** * Status for snapshotting an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotIndexShardStatus extends BroadcastShardResponse implements ToXContentFragment { private SnapshotIndexShardStage stage = SnapshotIndexShardStage.INIT; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index 9c2db62c33bd0..402fee76bc663 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; @@ -54,8 +55,9 @@ /** * Represents snapshot status of all shards in the index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotIndexStatus implements Iterable, ToXContentFragment { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index ad514a13312ba..bfd0c23c579bc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; @@ -47,8 +48,9 @@ /** * Status of a snapshot shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotShardsStats implements ToXContentObject { private int initializingShards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java index babdb7540a314..f287e94edd0dc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -50,8 +51,9 @@ /** * Stats for snapshots * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStats implements Writeable, ToXContentObject { private long startTime; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 0b101070db478..e0f380b3ebbe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -68,8 +69,9 @@ /** * Status of a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStatus implements ToXContentObject, Writeable { private final Snapshot snapshot; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index 22fca3c54c604..061e73f1094b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Get snapshot status request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusRequest extends ClusterManagerNodeRequest { private String repository = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 55f156d4a470e..9377eca60e353 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Snapshots status request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< SnapshotsStatusRequest, SnapshotsStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 2f0e5e4b4686f..df436a587c2ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -51,8 +52,9 @@ /** * Snapshot status response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusResponse extends ActionResponse implements ToXContentObject { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java index 7c937a1700db0..90a52f7406d57 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -46,8 +47,9 @@ /** * Transport request for obtaining cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index b9bfeca9f7386..01a49c15fc1ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Transport request builder for obtaining cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterStateRequest, ClusterStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index b947ad34a8cc2..d09105b2bd0a0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * The response for getting the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateResponse extends ActionResponse { private ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java index 3372448bdacae..b7054ae99361b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -61,8 +62,9 @@ /** * Statistics about analysis usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AnalysisStats implements ToXContentFragment, Writeable { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 63ac76ae65783..26e554f44fca1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.cache.query.QueryCacheStats; @@ -50,8 +51,9 @@ /** * Cluster Stats per index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsIndices implements ToXContentFragment { private int indexCount; @@ -180,8 +182,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Shard Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardStats implements ToXContentFragment { int indices; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index 5689596763cef..b44e9cfc5c74a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -38,6 +38,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; @@ -71,8 +72,9 @@ /** * Per Node Cluster Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsNodes implements ToXContentFragment { private final Counts counts; @@ -214,8 +216,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Counts * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Counts implements ToXContentFragment { static final String COORDINATING_ONLY = "coordinating_only"; @@ -282,8 +285,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Operating System Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class OsStats implements ToXContentFragment { final int availableProcessors; final int allocatedProcessors; @@ -395,8 +399,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Process Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ProcessStats implements ToXContentFragment { final int count; @@ -498,8 +503,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner JVM Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class JvmStats implements ToXContentFragment { private final Map versions; @@ -626,8 +632,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner JVM Version * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class JvmVersion { String version; String vmName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index dc472c10f550b..6a99451c596ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * A request to get cluster level stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsRequest extends BaseNodesRequest { public ClusterStatsRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index aaf5e3aeffeb8..0dcb03dc26d0e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for obtaining cluster stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< ClusterStatsRequest, ClusterStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index 1b609b1f7556d..cc002b689a2a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Transport response for obtaining cluster stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsResponse extends BaseNodesResponse implements ToXContentFragment { final ClusterStatsNodes nodesStats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java index f833c52493e00..f73c363b2ea60 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Statistics about an index feature. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexFeatureStats implements ToXContent, Writeable { final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java index 64218765a57e6..8e6fdb02b1f22 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -58,8 +59,9 @@ /** * Usage statistics about mappings usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MappingStats implements ToXContentFragment, Writeable { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 18098bc31432f..5efec8b876435 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -168,6 +168,9 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, + false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 7d92162015950..0bb4f3625ddad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for deleting stored scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteStoredScriptRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index 34e0d429f2098..cbadde386d5a9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting stored scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< DeleteStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index 70384b5fb648e..25bc3ecd6b7ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptRequest extends ClusterManagerNodeReadRequest { protected String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java index ae969963be62f..ca0bd32f1f38b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetStoredScriptRequest, GetStoredScriptResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index cf5a3ec44e560..a81faff2abb03 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.storedscripts; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -53,8 +54,9 @@ /** * Transport response for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptResponse extends ActionResponse implements StatusToXContentObject { public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index f45bee955da02..8731b18fff338 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; @@ -54,8 +55,9 @@ /** * Transport request for putting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutStoredScriptRequest extends AcknowledgedRequest implements ToXContentFragment { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 2a06cd23c10b6..46773177e9a74 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -35,14 +35,16 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; /** * Transport request builder for putting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< PutStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index 83e8b93b32e0f..16103f02be596 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -41,8 +42,9 @@ /** * Transport request for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksRequest extends ClusterManagerNodeReadRequest { public PendingClusterTasksRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index b5e77f291a701..c932c2e91f314 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< PendingClusterTasksRequest, PendingClusterTasksResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index 118d2cf3065e6..9f4568c88b273 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.tasks; import org.opensearch.cluster.service.PendingClusterTask; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Transport response for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContentObject { private final List pendingTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java index 94dbb5ff46a02..0b56216790d94 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchGenerationException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; @@ -54,8 +55,9 @@ /** * Represents an alias, to be associated with an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Alias implements Writeable, ToXContentFragment { private static final ParseField FILTER = new ParseField("filter"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 2899e791604a5..bc27c70282368 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.AliasAction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -72,8 +73,9 @@ /** * A request to add/remove aliases for one or more indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesAliasesRequest extends AcknowledgedRequest implements ToXContentObject { private List allAliasActions = new ArrayList<>(); @@ -95,8 +97,9 @@ public IndicesAliasesRequest() {} /** * Request to take one or more actions on one or more indexes and alias combinations. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AliasActions implements AliasesRequest, Writeable, ToXContentObject { private static final ParseField INDEX = new ParseField("index"); @@ -118,8 +121,9 @@ public static class AliasActions implements AliasesRequest, Writeable, ToXConten /** * The type of request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { ADD((byte) 0, AliasActions.ADD), REMOVE((byte) 1, AliasActions.REMOVE), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index 13c57cc781925..d262c9cd42ce9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; import java.util.Map; @@ -42,8 +43,9 @@ /** * Builder for request to modify many aliases at once. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder< IndicesAliasesRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java index 28894baa629b3..00d754c8fb029 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -35,6 +35,7 @@ import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesRequest extends ClusterManagerNodeReadRequest implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java index aecbd689a647c..e9a15e9f9dfb3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java @@ -33,12 +33,14 @@ package org.opensearch.action.admin.indices.alias.get; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder { public GetAliasesRequestBuilder(OpenSearchClient client, GetAliasesAction action, String... aliases) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java index e50540e6b9aea..71cbbe2c6594f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.alias.get; import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Transport response for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesResponse extends ActionResponse { private final Map> aliases; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java index 274bb8bd15e08..a70f12bed4f1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -62,8 +63,9 @@ /** * Transport action for analyzing text * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AnalyzeAction extends ActionType { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); @@ -77,8 +79,9 @@ private AnalyzeAction() { * A request to analyze a text associated with a specific index. Allow to provide * the actual analyzer name to perform the analysis with. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends SingleShardRequest { private String[] text; @@ -308,8 +311,9 @@ public static Request fromXContent(XContentParser parser, String index) throws I /** * Inner Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { private final DetailAnalyzeResponse detail; @@ -404,8 +408,9 @@ static final class Fields { /** * Inner Analyze Token * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AnalyzeToken implements Writeable, ToXContentObject { private final String term; private final int startOffset; @@ -542,8 +547,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner Detail Analyze Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DetailAnalyzeResponse implements Writeable, ToXContentFragment { private final boolean customAnalyzer; @@ -709,8 +715,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner Analyze Token List * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AnalyzeTokenList implements Writeable, ToXContentObject { private final String name; private final AnalyzeToken[] tokens; @@ -783,8 +790,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner character filtered text * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CharFilteredText implements Writeable, ToXContentObject { private final String name; private final String[] texts; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index a7f21b2af16fc..b0240a4db82cd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -33,14 +33,16 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import java.util.Map; /** * Transport request builder for analyzing text * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder< AnalyzeAction.Request, AnalyzeAction.Response, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 3b69a0980e97f..57266b5aec58f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for clearing cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheRequest extends BroadcastRequest { private boolean queryCache = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index e0513e77a5aa5..074e2ce0b35eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for clearing cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, ClearIndicesCacheResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java index 0c6c5ca27b24d..6fe180d900311 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.cache.clear; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -45,8 +46,9 @@ /** * The response of a clear cache action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index 0f98550343a13..e785c31c4a0b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -48,8 +49,9 @@ /** * A request to close an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index b3b53a0043c70..92c32c9ace490 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for close index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder { public CloseIndexRequestBuilder(OpenSearchClient client, CloseIndexAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index df6d0e4c9be39..2e0c5cb5842b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Transport response for closing an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; @@ -99,8 +101,9 @@ public String toString() { /** * Inner index result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IndexResult implements Writeable, ToXContentFragment { private final Index index; @@ -199,8 +202,9 @@ public String toString() { /** * Shard Result from Close Index Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardResult implements Writeable, ToXContentFragment { private final int id; @@ -257,8 +261,9 @@ public String toString() { /** * Inner Failure if something goes wrong * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index d048a3008f588..e5dbefc3dba97 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -42,6 +42,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentFactory; @@ -80,8 +81,9 @@ * @see org.opensearch.client.Requests#createIndexRequest(String) * @see CreateIndexResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest { public static final ParseField MAPPINGS = new ParseField("mappings"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 3493ee06827c1..b233f45422967 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentType; @@ -48,8 +49,9 @@ /** * Builder for a create index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder< CreateIndexRequest, CreateIndexResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index 1b3ad48402eed..3258ffd8672a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.create; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ /** * A response for a create index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexResponse extends ShardsAcknowledgedResponse { private static final ParseField INDEX = new ParseField("index"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index 0443325f82778..c5c03c93785d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ * Represents a request to delete a particular dangling index, specified by its UUID. The {@link #acceptDataLoss} * flag must also be explicitly set to true, or later validation will fail. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteDanglingIndexRequest extends AcknowledgedRequest { private final String indexUUID; private final boolean acceptDataLoss; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 590f08a82c1d2..2702b6a05c4bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * by its UUID. The {@link #acceptDataLoss} flag must also be * explicitly set to true, or later validation will fail. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ImportDanglingIndexRequest extends AcknowledgedRequest { private final String indexUUID; private final boolean acceptDataLoss; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java index adbfe9b760ed8..119c4acbf4160 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.dangling.list; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Transport request for listing a dangling indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListDanglingIndicesRequest extends BaseNodesRequest { /** * Filter the response by index UUID. Leave as null to find all indices. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index 384ae2e028bba..be63bee6312fe 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.dangling.DanglingIndexInfo; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -58,8 +59,9 @@ * information for each dangling index is presented under the "dangling_indices" key. If any nodes * in the cluster failed to answer, the details are presented under the "_nodes.failures" key. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListDanglingIndicesResponse extends BaseNodesResponse implements StatusToXContentObject { public ListDanglingIndicesResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 14b29c4b2bbba..4c1690d25fbd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.metadata.MetadataCreateDataStreamService; import org.opensearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -61,8 +62,9 @@ /** * Transport action for creating a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateDataStreamAction extends ActionType { public static final CreateDataStreamAction INSTANCE = new CreateDataStreamAction(); @@ -75,8 +77,9 @@ private CreateDataStreamAction() { /** * Request for Creating Data Stream * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends AcknowledgedRequest implements IndicesRequest { private final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index 190194e071bc4..6b0aec6a31839 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -53,6 +53,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.TimeValue; @@ -78,8 +79,9 @@ /** * Transport action for deleting a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteDataStreamAction extends ActionType { private static final Logger logger = LogManager.getLogger(DeleteDataStreamAction.class); @@ -94,8 +96,9 @@ private DeleteDataStreamAction() { /** * Request for deleting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index 6a7967d31653c..1db4e85887c23 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -51,6 +51,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; @@ -76,8 +77,9 @@ /** * Transport action for getting a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetDataStreamAction extends ActionType { public static final GetDataStreamAction INSTANCE = new GetDataStreamAction(); @@ -90,8 +92,9 @@ private GetDataStreamAction() { /** * Request for getting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] names; @@ -156,16 +159,18 @@ public IndicesRequest indices(String... indices) { /** * Response for getting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { public static final ParseField DATASTREAMS_FIELD = new ParseField("data_streams"); /** * Data streams information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamInfo extends AbstractDiffable implements ToXContentObject { public static final ParseField STATUS_FIELD = new ParseField("status"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index 006da6b3cbb09..5fbefbc6e1591 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -47,8 +48,9 @@ /** * A request to delete an index. Best created with {@link org.opensearch.client.Requests#deleteIndexRequest(String)}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 33f6342e94139..6cf0920f8570f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder< DeleteIndexRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index f9bd849e549b7..91fde3ec62d7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Transport request for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 8459bbd8b874e..2e0f28cb7e3f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesExistsRequest, IndicesExistsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java index 4ac85492229f0..a457cca74f897 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.exists.indices; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Transport response for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsResponse extends ActionResponse { private boolean exists; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java index c8b28efc5f294..f8cf6ab72e038 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -52,8 +53,9 @@ * @see org.opensearch.client.IndicesAdminClient#flush(FlushRequest) * @see FlushResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushRequest extends BroadcastRequest { private boolean force = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java index d0cbd1d27fba6..50d7a78c919f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for flushing one or more indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { public FlushRequestBuilder(OpenSearchClient client, FlushAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java index 4135654e66271..3881a839a6dcd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.flush; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -45,8 +46,9 @@ /** * A response to flush action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("flush", true, arg -> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 89e5a57094a96..f38b49f434261 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.engine.Engine; @@ -54,8 +55,9 @@ * @see org.opensearch.client.IndicesAdminClient#forceMerge(ForceMergeRequest) * @see ForceMergeResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeRequest extends BroadcastRequest { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index cff05f194cac4..d8a618a1828ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request to force merge one or more indices. In order to force merge all @@ -42,8 +43,9 @@ * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder< ForceMergeRequest, ForceMergeResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java index c5316f3375ad5..e6a7fe0025b87 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.forcemerge; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -47,8 +48,9 @@ /** * A response for force merge action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index 0b2084865e23e..47c59791edf04 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,14 +44,16 @@ /** * A request to retrieve information about an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexRequest extends ClusterInfoRequest { /** * The features to get. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Feature { ALIASES((byte) 0), MAPPINGS((byte) 1), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index 3019191e5570e..e97319abe5f98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get information about an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder { public GetIndexRequestBuilder(OpenSearchClient client, GetIndexAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index c135b511c3315..5a237b8d3470f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -57,8 +58,9 @@ /** * A response for a get index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexResponse extends ActionResponse implements ToXContentObject { private Map mappings = Map.of(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index 6a1d04efa9714..f18f973c07959 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { protected boolean local = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index ebc0c015c5140..d379bfbecafd0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * A helper class to build {@link GetFieldMappingsRequest} objects * - * @opensearch.internal - **/ + * @opensearch.api + */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder { public GetFieldMappingsRequestBuilder(OpenSearchClient client, GetFieldMappingsAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 94c88e30295a8..86533f14e83e1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -65,8 +66,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsResponse extends ActionResponse implements ToXContentObject { private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -178,8 +180,9 @@ private void addFieldMappingsToBuilder(XContentBuilder builder, Params params, M /** * Metadata for field mappings for toXContent * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class FieldMappingMetadata implements ToXContentFragment { private static final ParseField FULL_NAME = new ParseField("full_name"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 3988b0dd5a508..cd0ecdb30e5fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -41,8 +42,9 @@ /** * Transport request to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsRequest extends ClusterInfoRequest { public GetMappingsRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 85bf8c2ffd9c6..36ca1cb088cb5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder< GetMappingsRequest, GetMappingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java index e59f08c4fa162..56c979c20b6d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -52,8 +53,9 @@ /** * Transport response to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsResponse extends ActionResponse implements ToXContentFragment { private static final ParseField MAPPINGS = new ParseField("mappings"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 32f751ceb1c5a..8122db3278795 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; @@ -75,8 +76,9 @@ * @see org.opensearch.client.IndicesAdminClient#putMapping(PutMappingRequest) * @see AcknowledgedResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { private static final Set RESERVED_FIELDS = Set.of( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 9d703bf428c8e..d44b243bb0edb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,8 +46,9 @@ /** * Builder for a put mapping request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder< PutMappingRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index 16451e311e7d3..f48ec1ae6fb71 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -49,8 +50,9 @@ /** * A request to open an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index bf09c3f173491..19770255b0ee1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for for open index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder { public OpenIndexRequestBuilder(OpenSearchClient client, OpenIndexAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index bd96a1071c129..78af1abc3ce31 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.open; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -43,8 +44,9 @@ /** * A response for a open index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexResponse extends ShardsAcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index b5097f96fe52b..1fb8514cbf48c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.util.CollectionUtils; @@ -49,8 +50,9 @@ /** * A request to add a block to an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private final APIBlock block; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index 8322ba19f433e..ebcdf700d3b6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.annotation.PublicApi; /** * Builder for add index block request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockRequestBuilder extends AcknowledgedRequestBuilder< AddIndexBlockRequest, AddIndexBlockResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 13cee3f8e0159..3ab64fa55af8b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Transport response to open an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockResponse extends ShardsAcknowledgedResponse { private final List indices; @@ -99,8 +101,9 @@ public String toString() { /** * Result for adding a block * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AddBlockResult implements Writeable, ToXContentFragment { private final Index index; @@ -199,8 +202,9 @@ public String toString() { /** * Per shard result for adding a block * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AddBlockShardResult implements Writeable, ToXContentFragment { private final int id; @@ -258,8 +262,9 @@ public String toString() { /** * Contains failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java index 69b20b697dd9a..aca98a6d9c571 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Request for recovery information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryRequest extends BroadcastRequest { private boolean detailed = false; // Provides extra details in the response diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java index 99a1fb430fb28..2f44a5f2df04a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Recovery information request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryRequestBuilder extends BroadcastOperationRequestBuilder { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java index e6440fd95aa39..27b6b334ef4b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.recovery; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -48,8 +49,9 @@ /** * Information regarding the recovery state of indices and their associated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryResponse extends BroadcastResponse { private final Map> shardRecoveryStates; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java index c6e230cc66373..c8000cbc40da8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.refresh; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -46,8 +47,9 @@ * @see org.opensearch.client.IndicesAdminClient#refresh(RefreshRequest) * @see RefreshResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshRequest extends BroadcastRequest { public RefreshRequest(String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 5b27ae13f24be..ebafc726bfd39 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A refresh request making all operations performed since the last refresh available for search. The (near) real-time * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { public RefreshRequestBuilder(OpenSearchClient client, RefreshAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java index 4c89962dfec5e..30351b8983717 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.refresh; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -45,8 +46,9 @@ /** * The response of a refresh action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshResponse extends BroadcastResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", true, arg -> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index b82b68f6f9489..9a913c6bcafff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -19,8 +20,9 @@ /** * Request for Segment Replication stats information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsRequest extends BroadcastRequest { private boolean detailed = false; // Provides extra details in the response private boolean activeOnly = false; // Only reports on active segment replication events diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java index 7e68d2ac59f07..9f00bff414cf5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Segment Replication stats information request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsRequestBuilder extends BroadcastOperationRequestBuilder< SegmentReplicationStatsRequest, SegmentReplicationStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java index f1b6b90e0cae1..e65e13a945abd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.indices.replication; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -24,8 +25,9 @@ /** * Stats Information regarding the Segment Replication state of indices and their associated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsResponse extends BroadcastResponse { private final Map> replicationStats; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java index 98c2446e17998..e20e4c2d868a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.ParseField; @@ -82,8 +83,9 @@ /** * Transport action to resolve an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResolveIndexAction extends ActionType { public static final ResolveIndexAction INSTANCE = new ResolveIndexAction(); @@ -96,8 +98,9 @@ private ResolveIndexAction() { /** * Request for resolving an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ActionRequest implements IndicesRequest.Replaceable { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); @@ -196,8 +199,9 @@ public String getName() { /** * The resolved index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedIndex extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField ALIASES_FIELD = new ParseField("aliases"); @@ -284,8 +288,9 @@ public int hashCode() { /** * The resolved index alias * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedAlias extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField INDICES_FIELD = new ParseField("indices"); @@ -346,8 +351,9 @@ public int hashCode() { /** * The resolved data stream * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedDataStream extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField BACKING_INDICES_FIELD = new ParseField("backing_indices"); @@ -418,8 +424,9 @@ public int hashCode() { /** * Response for resolving an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { static final ParseField INDICES_FIELD = new ParseField("indices"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java index e014d6d703500..4d0b7fc8c13c7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; @@ -42,8 +43,9 @@ /** * Base class for rollover request conditions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Condition implements NamedWriteable, ToXContentFragment { protected T value; @@ -96,8 +98,9 @@ public String name() { /** * Holder for index stats used to evaluate conditions * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats { public final long numDocs; public final long indexCreated; @@ -113,8 +116,9 @@ public Stats(long numDocs, long indexCreated, ByteSizeValue indexSize) { /** * Holder for evaluated condition result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Result { public final Condition condition; public final boolean matched; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java index 475e44e1820d5..75c68350e2204 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,8 +53,9 @@ /** * Class for holding Rollover related information within an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index 353cdbbbc840c..68c0076bbd302 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -58,8 +59,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest { private static final ObjectParser PARSER = new ObjectParser<>("rollover"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 9e6b8518e92d3..acac7102edbc7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeValue; @@ -42,8 +43,9 @@ /** * Transport request to rollover an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RolloverRequest, RolloverResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index a66fcc9e9bcf2..b7df35cd480bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,8 +56,9 @@ * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RolloverResponse extends ShardsAcknowledgedResponse implements ToXContentObject { private static final ParseField NEW_INDEX = new ParseField("new_index"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java index 7249bc5e9d3ba..4b37da2c99850 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.segments; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * List of Index Segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexSegments implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java index a6caf0649fde1..8fdc050511050 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.segments; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.util.Arrays; @@ -40,8 +41,9 @@ /** * List of Index Shard Segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardSegments implements Iterable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index 36f5979552011..648f58dada4f9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,8 +56,9 @@ /** * Transport response for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentResponse extends BroadcastResponse { private final ShardSegments[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 03a41cd21572f..aff2b383df08f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Transport request for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentsRequest extends BroadcastRequest { protected boolean verbose = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index 4b758e1f4bfb1..579b6d997acd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesSegmentsRequest, IndicesSegmentResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java index a4dc901544e0b..84edec384b68a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -25,7 +26,10 @@ /** * Transport request for retrieving PITs segment information + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class PitSegmentsRequest extends BroadcastRequest { private boolean verbose = false; private final List pitIds = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java index 90317542244ff..09adda4d79108 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Collection of shard segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSegments implements Writeable, Iterable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java index 01383b6b6545d..547cfa8c3bce3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * Transport request for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 84cd4e8682e93..5ba42c05dccf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Transport request builder for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetSettingsRequest, GetSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java index 61c9b68629194..695c98684f0f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.settings.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.action.ActionResponse; @@ -53,8 +54,9 @@ /** * Transport response for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsResponse extends ActionResponse implements ToXContentObject { private final Map indexToSettings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 43571dc8220f9..45172e313dfcc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -60,8 +61,9 @@ /** * Request for an update index settings action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateSettingsRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 7501f0c7798de..08d7a240aa007 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Builder for an update index settings request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< UpdateSettingsRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index bc9633f2bd2db..b986ef3c62e73 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -37,12 +37,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; /** * Request builder for {@link IndicesShardStoresRequest} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoreRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesShardStoresRequest, IndicesShardStoresResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java index d3261bea68f38..ea07325c35c3b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Request for {@link IndicesShardStoresAction} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoresRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 98132b25a6d99..c2f373fcbd6a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -53,15 +54,17 @@ * Consists of {@link StoreStatus}s for requested indices grouped by * indices and shard ids and a list of encountered node {@link Failure}s * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoresResponse extends ActionResponse implements ToXContentFragment { /** * Shard store information from a node * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class StoreStatus implements Writeable, ToXContentFragment, Comparable { private final DiscoveryNode node; private final String allocationId; @@ -70,7 +73,10 @@ public static class StoreStatus implements Writeable, ToXContentFragment, Compar /** * The status of the shard store with respect to the cluster + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum AllocationStatus { /** @@ -232,8 +238,9 @@ public int compareTo(StoreStatus other) { /** * Single node failure while retrieving shard store information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index a24a33006ac1e..a5225f2243876 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -40,6 +40,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -57,8 +58,9 @@ /** * Request class to shrink an index into a single shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { public static final ObjectParser PARSER = new ObjectParser<>("resize_request"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 855e678c77b9b..f9d90d46b0904 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -36,14 +36,16 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; /** * Transport request builder for resizing an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { public ResizeRequestBuilder(OpenSearchClient client, ActionType action) { super(client, action, new ResizeRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java index 1aa09023e3583..a4801f84c9ef9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.shrink; import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -42,8 +43,9 @@ /** * A response for a resize index action, either shrink or split index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ResizeResponse extends CreateIndexResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java index 6403ed735ae49..91bcc0d62b1c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java @@ -32,11 +32,14 @@ package org.opensearch.action.admin.indices.shrink; +import org.opensearch.common.annotation.PublicApi; + /** * The type of the resize operation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ResizeType { SHRINK, SPLIT, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java index e4abaef4ddfa8..8bfeb13b253c3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java @@ -34,6 +34,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -68,8 +69,9 @@ /** * Common Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CommonStats implements Writeable, ToXContentFragment { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 7503020d1c8ef..a7d9f95b80f7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Common Stats Flags for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CommonStatsFlags implements Writeable, Cloneable { public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); @@ -255,8 +257,9 @@ public CommonStatsFlags clone() { /** * The flags. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Flag { Store("store", 0), Indexing("indexing", 1), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java index 1635ce0bf83fc..b0143d9491087 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * IndexShardStats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardStats implements Iterable, Writeable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java index 1c57ca39576b0..09614ea801193 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * Index Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexStats implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java index 54f3e9b7d1a24..2b64464a76899 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsRequest extends BroadcastRequest { private CommonStatsFlags flags = new CommonStatsFlags(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index c211812b32c48..acc085a96a896 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** @@ -45,8 +46,9 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesStatsRequest, IndicesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java index 6f051fa19c99f..6242081cd2371 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Transport response for retrieving indices stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java index 77562fa19b319..4ed1ce95b7de2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -49,8 +50,9 @@ /** * Shard Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardStats implements Writeable, ToXContentFragment { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index 85524bddc56d8..e93a428b0af26 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * A request to delete an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexTemplateRequest extends ClusterManagerNodeRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 4a990b7837120..60771cfa453ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting an index template * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index 18142eb7b787d..fa45efdc53124 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Request that allows to retrieve index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesRequest extends ClusterManagerNodeReadRequest { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 09de1733239fc..f8c02b4c8be08 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -33,12 +33,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to retrieve one or more Index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetIndexTemplatesRequest, GetIndexTemplatesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 761a345b49538..009a73b615e0c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ /** * Response for retrieving one or more Index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesResponse extends ActionResponse implements ToXContentObject { private final List indexTemplates; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 028275778538e..c4396f22d7c16 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ * Transport Request for handling simulating an index template either by name (looking it up in the * cluster state), or by a provided template configuration * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulateIndexTemplateRequest extends ClusterManagerNodeReadRequest { private String indexName; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 8a370a6845db6..c6a9d3530a8cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.metadata.Template; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -50,8 +51,9 @@ /** * Contains the information on what V2 templates would match a given index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulateIndexTemplateResponse extends ActionResponse implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java index 1979cc39e32fb..ed209e18b64ef 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * An action for putting a composable template into the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutComposableIndexTemplateAction extends ActionType { public static final PutComposableIndexTemplateAction INSTANCE = new PutComposableIndexTemplateAction(); @@ -68,8 +70,9 @@ private PutComposableIndexTemplateAction() { /** * A request for putting a single index template into the cluster state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeRequest implements IndicesRequest { private final String name; @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 4f7a28a03e6fb..d4e9200508bfa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -41,6 +41,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -78,8 +79,9 @@ /** * A request to create an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutIndexTemplateRequest extends ClusterManagerNodeRequest implements IndicesRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 04d2236e00e8f..931d12de574ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; @@ -47,8 +48,9 @@ /** * A request builder for putting an index template into the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< PutIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java index 32cf6d4bfe70d..e760067e9f5be 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.upgrade.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.util.Arrays; @@ -40,8 +41,9 @@ /** * Status for an Index Shard Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardUpgradeStatus implements Iterable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index 4513a321e2a51..fd8ddc1293aaf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.upgrade.get; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * Status for an Index Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexUpgradeStatus implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java index 57fb2513faf78..783b44ba6570d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastShardResponse; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Status for a Shard Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardUpgradeStatus extends BroadcastShardResponse { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java index 987661d2129ac..2584ab6b370da 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.get; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -41,8 +42,9 @@ /** * Transport Request for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusRequest extends BroadcastRequest { public UpgradeStatusRequest() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index c698c38fe12d5..ac5f881c35dc5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport Request Builder for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder< UpgradeStatusRequest, UpgradeStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 20dbd12287231..ba2915ee4ddf1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.get; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Transport Response for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java index 98c307c37ea54..4df02ad7fa8c0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ * @see org.opensearch.client.IndicesAdminClient#upgrade(UpgradeRequest) * @see UpgradeResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeRequest extends BroadcastRequest { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java index 8203f9d51b8e4..bf316504dc920 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or * {@code null} for the indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder { public UpgradeRequestBuilder(OpenSearchClient client, UpgradeAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java index d08cc65832f8f..4bdf41dabba37 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * A response for the upgrade action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeResponse extends BroadcastResponse { private final Map> versions; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java index 248cab5e40eaf..84aa687af0fe1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.validate.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Query Explanation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryExplanation implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java index ef0d14502af23..94bec696dd2c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ *

    * The request requires the query to be set using {@link #query(QueryBuilder)} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { private QueryBuilder query = new MatchAllQueryBuilder(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index 6209f41d88be2..de4cf5ae2b904 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; /** * Transport Request Builder to Validate a Query * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder< ValidateQueryRequest, ValidateQueryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 44d6d637cdc51..791128491f33d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.validate.query; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,8 +53,9 @@ /** * The response of the validate action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryResponse extends BroadcastResponse { public static final String VALID_FIELD = "valid"; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 20dea872e0ad2..81e8940f1b505 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -41,6 +41,7 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -68,8 +69,9 @@ * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * of the relevant action, and if it has failed or not (with the failure message in case it failed). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkItemResponse implements Writeable, StatusToXContentObject { private static final String _INDEX = "_index"; @@ -179,8 +181,9 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; public static final String ID_FIELD = "id"; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index f2f3077001a13..47abd0337fcf9 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -45,6 +45,7 @@ import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; @@ -71,8 +72,9 @@ * Note that we only support refresh on the bulk request not per item. * @see org.opensearch.client.Client#bulk(BulkRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest, Accountable { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkRequest.class); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java index 08eee82a53cf9..a165d186d3878 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java @@ -44,6 +44,7 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.MediaType; @@ -52,8 +53,9 @@ * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * it in a single batch. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { public BulkRequestBuilder(OpenSearchClient client, BulkAction action, @Nullable String globalIndex) { diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java index 655e65bc22138..6b70e2acd41d2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.bulk; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.action.ActionResponse; @@ -56,8 +57,9 @@ * bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the * failure message). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkResponse extends ActionResponse implements Iterable, StatusToXContentObject { private static final String ITEMS = "items"; diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 726ba7ba119af..4a9b07c12821d 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -85,6 +85,11 @@ import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -133,6 +138,7 @@ public class TransportBulkAction extends HandledTransportAction() { - @Override - public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { - bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); - } - docStatusStats.inc(bulkItemResponse.status()); - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); - } + final Span span = tracer.startSpan(SpanBuilder.from("bulkShardAction", nodeId, bulkShardRequest)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + shardBulkAction.execute( + bulkShardRequest, + TraceableActionListener.create(ActionListener.runBefore(new ActionListener() { + @Override + public void onResponse(BulkShardResponse bulkShardResponse) { + for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { + // we may have no response if item failed + if (bulkItemResponse.getResponse() != null) { + bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + docStatusStats.inc(bulkItemResponse.status()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } - @Override - public void onFailure(Exception e) { - // create failures for all relevant requests - for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - final DocWriteRequest docWriteRequest = request.request(); - final BulkItemResponse bulkItemResponse = new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } - docStatusStats.inc(bulkItemResponse.status()); - responses.set(request.id(), bulkItemResponse); - } + @Override + public void onFailure(Exception e) { + // create failures for all relevant requests + for (BulkItemRequest request : requests) { + final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + final DocWriteRequest docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( + request.id(), + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) + ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); + } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } - private void finishHim() { - indicesService.addDocStatusStats(docStatusStats); - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - } - }, releasable::close)); + private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); + listener.onResponse( + new BulkResponse( + responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos) + ) + ); + } + }, releasable::close), span, tracer) + ); + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } } bulkRequest = null; // allow memory for bulk request items to be reclaimed before all items have been completed } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index fddda0ef1f9a7..268a6ed6f85b8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -99,6 +99,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportChannel; @@ -161,7 +162,8 @@ public TransportShardBulkAction( IndexingPressureService indexingPressureService, SegmentReplicationPressureService segmentReplicationPressureService, RemoteStorePressureService remoteStorePressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -177,7 +179,8 @@ public TransportShardBulkAction( EXECUTOR_NAME_FUNCTION, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index 9a9e861ad8055..d37e049d44720 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.DocWriteRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -64,8 +65,9 @@ * @see org.opensearch.client.Client#delete(DeleteRequest) * @see org.opensearch.client.Requests#deleteRequest(String) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest, diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java index 0436962ce01d2..66aa1c73042fe 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java @@ -36,13 +36,15 @@ import org.opensearch.action.support.replication.ReplicationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.VersionType; /** * A delete document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRequestBuilder extends ReplicationRequestBuilder implements WriteRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java index e4d44197a8885..c39a787ad763d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.delete; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; @@ -48,8 +49,9 @@ * @see org.opensearch.action.delete.DeleteRequest * @see org.opensearch.client.Client#delete(DeleteRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteResponse extends DocWriteResponse { public DeleteResponse(ShardId shardId, StreamInput in) throws IOException { @@ -112,8 +114,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to * instantiate the {@link DeleteResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { @Override diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java index aad5a9853a3c5..dd9c01b04acbb 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * Explain request encapsulating the explain query and document identifier to get an explanation for. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainRequest extends SingleShardRequest implements ToXContentObject { private static final ParseField QUERY_FIELD = new ParseField("query"); diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java index 3031cb6067469..681b48f7a6593 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -42,8 +43,9 @@ /** * A builder for {@link ExplainRequest}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder { ExplainRequestBuilder(OpenSearchClient client, ExplainAction action) { diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java b/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java index 895f87b83b6ff..80a8634e62d87 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Explanation; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -56,8 +57,9 @@ /** * Response containing the score explanation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField _INDEX = new ParseField("_index"); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java index 9b01f65f0f711..20b9789972fff 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java @@ -32,6 +32,7 @@ package org.opensearch.action.fieldcaps; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -60,8 +61,9 @@ /** * Describes the capabilities of a field optionally merged across multiple indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilities implements Writeable, ToXContentObject { private static final ParseField TYPE_FIELD = new ParseField("type"); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index c1c54c7018bcf..71e8db73e4ecc 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -52,7 +53,10 @@ /** * Transport request for retrieving field capabilities for an explicit list of fields + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FieldCapabilitiesRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NAME = "field_caps_request"; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java index 70a90b98bdf25..c589d344089f3 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; /** * Transport request builder for retrieving field capabilities * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilitiesRequestBuilder extends ActionRequestBuilder { public FieldCapabilitiesRequestBuilder(OpenSearchClient client, FieldCapabilitiesAction action, String... indices) { super(client, action, new FieldCapabilitiesRequest().indices(indices)); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index 2dcda2bf56b8f..72fdc75686e3b 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.fieldcaps; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -57,8 +58,10 @@ /** * Response for {@link FieldCapabilitiesRequest} requests. * - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentObject { private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField FIELDS_FIELD = new ParseField("fields"); diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 62d85d6d8f352..952fa8bdab63a 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -60,8 +61,9 @@ * @see org.opensearch.client.Requests#getRequest(String) * @see org.opensearch.client.Client#get(GetRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRequest extends SingleShardRequest implements RealtimeRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java index 6237cf73f0ca8..f50cbb16186f7 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.index.VersionType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -42,8 +43,9 @@ /** * A get document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRequestBuilder extends SingleShardOperationRequestBuilder { public GetRequestBuilder(OpenSearchClient client, GetAction action) { diff --git a/server/src/main/java/org/opensearch/action/get/GetResponse.java b/server/src/main/java/org/opensearch/action/get/GetResponse.java index c86128444d7eb..f7f7241933bd6 100644 --- a/server/src/main/java/org/opensearch/action/get/GetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/GetResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.get; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.document.DocumentField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.ParsingException; @@ -58,8 +59,9 @@ * @see GetRequest * @see org.opensearch.client.Client#get(GetRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetResponse extends ActionResponse implements Iterable, ToXContentObject { GetResult getResult; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java index 19c9b785e7ea2..09b4205ffe521 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * A single multi get response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetItemResponse implements Writeable { private final GetResponse response; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 7f0844d717488..b15c69a41972f 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -42,6 +42,7 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -69,8 +70,9 @@ /** * Transport request for a multi get. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetRequest extends ActionRequest implements Iterable, @@ -91,8 +93,9 @@ public class MultiGetRequest extends ActionRequest /** * A single get item. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Item implements Writeable, IndicesRequest, ToXContentObject { private String index; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java index c317edc07da8b..0b701c8ec11c7 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A multi get document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetRequestBuilder extends ActionRequestBuilder { public MultiGetRequestBuilder(OpenSearchClient client, MultiGetAction action) { diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java index 73372b85be3b2..3c3489a065d9a 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Transport response for a multi get. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { private static final ParseField INDEX = new ParseField("_index"); @@ -71,8 +73,9 @@ public class MultiGetResponse extends ActionResponse implements Iterable implements DocWriteRequest, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java index 4bf7634dcb7e1..9d4ad3c32778c 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.replication.ReplicationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * An index document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexRequestBuilder extends ReplicationRequestBuilder implements WriteRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/index/IndexResponse.java b/server/src/main/java/org/opensearch/action/index/IndexResponse.java index d0aa9b57b6528..53f832fc12c43 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/opensearch/action/index/IndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.index; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -50,8 +51,9 @@ * @see IndexRequest * @see org.opensearch.client.Client#index(IndexRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexResponse extends DocWriteResponse { public IndexResponse(ShardId shardId, StreamInput in) throws IOException { @@ -116,8 +118,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link Builder#build()} method is called to * instantiate the {@link IndexResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { @Override public IndexResponse build() { diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java index 2f05ce3a25320..b9d916e152c3d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * transport request to delete a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeletePipelineRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java index 6a2eb494e8d3f..bc253db85bb0f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to delete a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeletePipelineRequestBuilder extends ActionRequestBuilder { public DeletePipelineRequestBuilder(OpenSearchClient client, DeletePipelineAction action) { diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java index 4bae98098777d..c7266c31a5022 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * transport request to get a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineRequest extends ClusterManagerNodeReadRequest { private String[] ids; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java index bdc13523ffdc6..593ea2156d5e3 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetPipelineRequest, GetPipelineResponse, diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java index bd1b18be9a828..0719842bc985f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.ingest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -57,8 +58,9 @@ /** * transport response for getting a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineResponse extends ActionResponse implements StatusToXContentObject { private List pipelines; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index f764e4b23860a..06e89b5f2908b 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * transport request to put a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutPipelineRequest extends AcknowledgedRequest implements ToXContentObject { private String id; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java index e734abb6d7969..e8d6a4d332319 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java @@ -35,14 +35,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; /** * Transport request builder to put a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutPipelineRequestBuilder extends ActionRequestBuilder { public PutPipelineRequestBuilder(OpenSearchClient client, PutPipelineAction action) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java index 98a03272aff42..bc338a57f762d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java @@ -31,14 +31,16 @@ package org.opensearch.action.ingest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentObject; /** * Interface to simulate a document result * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SimulateDocumentResult extends Writeable, ToXContentObject { } diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index ec3ee981b646f..b51f25d2e62b1 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; @@ -60,8 +61,9 @@ /** * transport request to simulate a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineRequest extends ActionRequest implements ToXContentObject { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SimulatePipelineRequest.class); diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java index 55e6d95fde65c..2a5f281a5075c 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; /** * Transport request builder to simulate a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineRequestBuilder extends ActionRequestBuilder { /** diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java index 3cbbc4350c3bd..1fb3a69bd1e16 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -54,8 +55,9 @@ /** * transport response for simulating a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; diff --git a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java index 032fe83e2220b..9d60706d1f100 100644 --- a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java @@ -54,6 +54,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportException; @@ -93,7 +94,8 @@ public TransportResyncReplicationAction( ShardStateAction shardStateAction, ActionFilters actionFilters, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -109,7 +111,8 @@ public TransportResyncReplicationAction( EXECUTOR_NAME_FUNCTION, true, /* we should never reject resync because of thread pool capacity on primary */ indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 1c0a1280ad550..f18bbb8a1cc13 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -65,10 +65,10 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -115,13 +115,12 @@ abstract class AbstractSearchAsyncAction exten private final int maxConcurrentRequestsPerNode; private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; + private final SearchRequestContext searchRequestContext; private SearchPhase currentPhase; private final List releasables = new ArrayList<>(); - private Optional searchRequestOperationsListener; - AbstractSearchAsyncAction( String name, Logger logger, @@ -140,7 +139,7 @@ abstract class AbstractSearchAsyncAction exten SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -176,7 +175,7 @@ abstract class AbstractSearchAsyncAction exten this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; - this.searchRequestOperationsListener = Optional.ofNullable(searchRequestOperationsListener); + this.searchRequestContext = searchRequestContext; } @Override @@ -215,6 +214,7 @@ public final void start() { 0, 0, buildTookInMillis(), + timeProvider.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null @@ -426,18 +426,26 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha clusterState.version() ); } - onPhaseEnd(); + onPhaseEnd(searchRequestContext); executePhase(nextPhase); } } - private void onPhaseEnd() { - this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseEnd(this); }); + private void onPhaseEnd(SearchRequestContext searchRequestContext) { + if (getCurrentPhase() != null) { + long tookInNanos = System.nanoTime() - getCurrentPhase().getStartTimeInNanos(); + searchRequestContext.updatePhaseTookMap(getCurrentPhase().getName(), TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + } + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); } private void onPhaseStart(SearchPhase phase) { setCurrentPhase(phase); - this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseStart(this); }); + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + } + + private void onRequestEnd(SearchRequestContext searchRequestContext) { + this.searchRequestContext.getSearchRequestOperationsListener().onRequestEnd(this, searchRequestContext); } private void executePhase(SearchPhase phase) { @@ -662,6 +670,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), + timeProvider.getPhaseTook(), failures, clusters, searchContextId @@ -694,15 +703,18 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At searchContextId = null; } } + searchRequestContext.setTotalHits(internalSearchResponse.hits().getTotalHits()); + searchRequestContext.setShardStats(results.getNumShards(), successfulOps.get(), skippedOps.get(), failures.length); + onPhaseEnd(searchRequestContext); + onRequestEnd(searchRequestContext); listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } - onPhaseEnd(); setCurrentPhase(null); } @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { - this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> searchRequestOperations.onPhaseFailure(this)); + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index ae481736ad0aa..c693eea4a2c33 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -91,7 +91,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -112,7 +112,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction scrollIds; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java index 63f64e02a9dd2..c10417e756dd6 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Transport request builder for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollRequestBuilder extends ActionRequestBuilder { public ClearScrollRequestBuilder(OpenSearchClient client, ClearScrollAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java index 4428f693763ed..e2580ccdd0969 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -52,8 +53,9 @@ /** * Transport response for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SUCCEEDED = new ParseField("succeeded"); diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java index a47127f18e18d..840d4becda714 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java @@ -13,6 +13,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -30,7 +31,10 @@ /** * A request to make create point in time against one or more indices. + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class CreatePitRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { // keep alive for pit reader context diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java index aa841a02f1d20..410b93afc3e65 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -26,7 +27,10 @@ /** * Create point in time response with point in time id and shard success / failures + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField ID = new ParseField("pit_id"); private static final ParseField CREATION_TIME = new ParseField("creation_time"); diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 40098c6670b65..c534b306b1404 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -23,7 +24,10 @@ /** * This class captures if deletion of pit is successful along with pit id + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitInfo extends TransportResponse implements Writeable, ToXContent { /** * This will be true if PIT reader contexts are deleted ond also if contexts are not found. diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 1a66311cd9a1b..e21a63eef9433 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -27,7 +28,10 @@ /** * Request to delete one or more PIT search contexts based on IDs. + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitRequest extends ActionRequest implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index 00274f0e610a9..469d78ad73bd9 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -28,7 +29,10 @@ /** * Response class for delete pits flow which clears the point in time search contexts + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { private final List deletePitResults; diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java index 2c6ab6437fd2a..0cde81203063c 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -19,8 +20,9 @@ /** * Request to delete a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class DeleteSearchPipelineRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java index 9bbc81c0d6a4c..91353240a8156 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java @@ -11,6 +11,7 @@ import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -22,7 +23,10 @@ /** * Inner node get all pits response + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodeResponse extends BaseNodeResponse implements ToXContentFragment { /** diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index 948fe72eae817..336c8139561e9 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,7 +19,10 @@ /** * Request to get all active PIT IDs from all nodes of cluster + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodesRequest extends BaseNodesRequest { @Inject diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 9bb3ab6407696..8d858a00c409b 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -11,6 +11,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,7 +32,10 @@ /** * This class transforms active PIT objects from all nodes to unique PIT objects + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodesResponse extends BaseNodesResponse implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java index f573a37fa5dab..59e95aa87985a 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,8 +21,9 @@ /** * Request to get search pipelines * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class GetSearchPipelineRequest extends ClusterManagerNodeReadRequest { private final String[] ids; diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java index 64dde763e9ff8..0379046c8275d 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -32,8 +33,9 @@ /** * transport response for getting a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class GetSearchPipelineResponse extends ActionResponse implements StatusToXContentObject { private final List pipelines; diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java index 220b7247517b9..7e4ed186dd665 100644 --- a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -23,7 +24,10 @@ /** * This holds information about pit reader context such as pit id and creation time + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class ListPitInfo implements ToXContentFragment, Writeable { private final String pitId; private final long creationTime; diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index da8f8f144eaf2..5f46e0c298de4 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.IndicesOptions.WildcardStates; import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.bytes.BytesReference; @@ -71,8 +72,9 @@ /** * A multi search API request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0; @@ -277,6 +279,8 @@ public static void readMultiLineFormat( } else if ("cancel_after_time_interval".equals(entry.getKey()) || "cancelAfterTimeInterval".equals(entry.getKey())) { searchRequest.setCancelAfterTimeInterval(nodeTimeValue(value, null)); + } else if ("phase_took".equals(entry.getKey())) { + searchRequest.setPhaseTook(nodeBooleanValue(value)); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } @@ -374,6 +378,9 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild if (request.getCancelAfterTimeInterval() != null) { xContentBuilder.field("cancel_after_time_interval", request.getCancelAfterTimeInterval().getStringRep()); } + if (request.isPhaseTook() != null) { + xContentBuilder.field("phase_took", request.isPhaseTook()); + } xContentBuilder.endObject(); } diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java index f9bb90c69d925..3bf078e8ce793 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request builder for multiple search requests. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchRequestBuilder extends ActionRequestBuilder { public MultiSearchRequestBuilder(OpenSearchClient client, MultiSearchAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 24ce4c2406570..70bb0b99e69df 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -35,6 +35,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; @@ -59,8 +60,9 @@ /** * A multi search response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchResponse extends ActionResponse implements Iterable, ToXContentObject { private static final ParseField RESPONSES = new ParseField(Fields.RESPONSES); @@ -78,8 +80,9 @@ public class MultiSearchResponse extends ActionResponse implements Iterable implements ToXContentObject { private String id; private BytesReference source; diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index 59c0e54bb6cbc..7c702c16030b7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Id for a search context per node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchContextIdForNode implements Writeable { private final String node; private final ShardSearchContextId searchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index eca9646ee6b7c..cad092f884f53 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -77,7 +77,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction final ClusterState clusterState, final SearchTask task, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { super( SearchPhaseName.DFS_PRE_QUERY.getName(), @@ -97,7 +97,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestOperationsListener + searchRequestContext ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 1c7b3c1f1563c..0890e9f5de8d4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Locale; @@ -40,8 +41,9 @@ /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchPhase implements CheckedRunnable { private final String name; private long startTimeInNanos; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 45d39a6f85ea2..df451e0745e3c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.search.SearchPhaseResult; @@ -50,6 +51,7 @@ * * @opensearch.internal */ +@InternalApi public interface SearchPhaseContext extends Executor { // TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java index 4c0fe3ac06326..8cf92934c8a52 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -8,10 +8,14 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + /** * Enum for different Search Phases in OpenSearch - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "2.9.0") public enum SearchPhaseName { DFS_PRE_QUERY("dfs_pre_query"), QUERY("query"), diff --git a/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java b/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java index ffc64682cb07d..34e8aacbad250 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.search.SearchResponse.Clusters; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregations; @@ -53,6 +54,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchProgressListener { private static final Logger logger = LogManager.getLogger(SearchProgressListener.class); diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java new file mode 100644 index 0000000000000..8fe1be610f9af --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryShapeVisitor; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.List; +import java.util.ListIterator; + +/** + * Class to categorize the search queries based on the type and increment the relevant counters. + * Class also logs the query shape. + */ +final class SearchQueryCategorizer { + + private static final Logger log = LogManager.getLogger(SearchQueryCategorizer.class); + + final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { + searchQueryCounters = new SearchQueryCounters(metricsRegistry); + } + + public void categorize(SearchSourceBuilder source) { + QueryBuilder topLevelQueryBuilder = source.query(); + + logQueryShape(topLevelQueryBuilder); + incrementQueryTypeCounters(topLevelQueryBuilder); + incrementQueryAggregationCounters(source.aggregations()); + incrementQuerySortCounters(source.sorts()); + } + + private void incrementQuerySortCounters(List> sorts) { + if (sorts != null && sorts.size() > 0) { + for (ListIterator> it = sorts.listIterator(); it.hasNext();) { + SortBuilder sortBuilder = it.next(); + String sortOrder = sortBuilder.order().toString(); + searchQueryCounters.sortCounter.add(1, Tags.create().addTag("sort_order", sortOrder)); + } + } + } + + private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { + if (aggregations != null) { + searchQueryCounters.aggCounter.add(1); + } + } + + private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryBuilderVisitor searchQueryVisitor = new SearchQueryCategorizingVisitor(searchQueryCounters); + topLevelQueryBuilder.visit(searchQueryVisitor); + } + + private void logQueryShape(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + topLevelQueryBuilder.visit(shapeVisitor); + log.trace("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java new file mode 100644 index 0000000000000..98f0169e69a5c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Class to visit the querybuilder tree and also track the level information. + * Increments the counters related to Search Query type. + */ +final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { + private static final String LEVEL_TAG = "level"; + private final int level; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizingVisitor(SearchQueryCounters searchQueryCounters) { + this(searchQueryCounters, 0); + } + + private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) { + this.searchQueryCounters = counters; + this.level = level; + } + + public void accept(QueryBuilder qb) { + if (qb instanceof BoolQueryBuilder) { + searchQueryCounters.boolCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof FunctionScoreQueryBuilder) { + searchQueryCounters.functionScoreCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchQueryBuilder) { + searchQueryCounters.matchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchPhraseQueryBuilder) { + searchQueryCounters.matchPhrasePrefixCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MultiMatchQueryBuilder) { + searchQueryCounters.multiMatchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof QueryStringQueryBuilder) { + searchQueryCounters.queryStringQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RangeQueryBuilder) { + searchQueryCounters.rangeCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RegexpQueryBuilder) { + searchQueryCounters.regexCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof TermQueryBuilder) { + searchQueryCounters.termCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof WildcardQueryBuilder) { + searchQueryCounters.wildcardCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else { + searchQueryCounters.otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } + } + + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return new SearchQueryCategorizingVisitor(searchQueryCounters, level + 1); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java new file mode 100644 index 0000000000000..7e0259af07701 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +/** + * Class contains all the Counters related to search query types. + */ +final class SearchQueryCounters { + private static final String UNIT = "1"; + private final MetricsRegistry metricsRegistry; + + // Counters related to Query types + public final Counter aggCounter; + public final Counter boolCounter; + public final Counter functionScoreCounter; + public final Counter matchCounter; + public final Counter matchPhrasePrefixCounter; + public final Counter multiMatchCounter; + public final Counter otherQueryCounter; + public final Counter queryStringQueryCounter; + public final Counter rangeCounter; + public final Counter regexCounter; + + public final Counter sortCounter; + public final Counter skippedCounter; + public final Counter termCounter; + public final Counter totalCounter; + public final Counter wildcardCounter; + + public SearchQueryCounters(MetricsRegistry metricsRegistry) { + this.metricsRegistry = metricsRegistry; + this.aggCounter = metricsRegistry.createCounter( + "search.query.type.agg.count", + "Counter for the number of top level agg search queries", + UNIT + ); + this.boolCounter = metricsRegistry.createCounter( + "search.query.type.bool.count", + "Counter for the number of top level and nested bool search queries", + UNIT + ); + this.functionScoreCounter = metricsRegistry.createCounter( + "search.query.type.functionscore.count", + "Counter for the number of top level and nested function score search queries", + UNIT + ); + this.matchCounter = metricsRegistry.createCounter( + "search.query.type.match.count", + "Counter for the number of top level and nested match search queries", + UNIT + ); + this.matchPhrasePrefixCounter = metricsRegistry.createCounter( + "search.query.type.matchphrase.count", + "Counter for the number of top level and nested match phrase prefix search queries", + UNIT + ); + this.multiMatchCounter = metricsRegistry.createCounter( + "search.query.type.multimatch.count", + "Counter for the number of top level and nested multi match search queries", + UNIT + ); + this.otherQueryCounter = metricsRegistry.createCounter( + "search.query.type.other.count", + "Counter for the number of top level and nested search queries that do not match any other categories", + UNIT + ); + this.queryStringQueryCounter = metricsRegistry.createCounter( + "search.query.type.querystringquery.count", + "Counter for the number of top level and nested queryStringQuery search queries", + UNIT + ); + this.rangeCounter = metricsRegistry.createCounter( + "search.query.type.range.count", + "Counter for the number of top level and nested range search queries", + UNIT + ); + this.regexCounter = metricsRegistry.createCounter( + "search.query.type.regex.count", + "Counter for the number of top level and nested regex search queries", + UNIT + ); + this.skippedCounter = metricsRegistry.createCounter( + "search.query.type.skipped.count", + "Counter for the number queries skipped due to error", + UNIT + ); + this.sortCounter = metricsRegistry.createCounter( + "search.query.type.sort.count", + "Counter for the number of top level sort search queries", + UNIT + ); + this.termCounter = metricsRegistry.createCounter( + "search.query.type.term.count", + "Counter for the number of top level and nested term search queries", + UNIT + ); + this.totalCounter = metricsRegistry.createCounter( + "search.query.type.total.count", + "Counter for the number of top level and nested search queries", + UNIT + ); + this.wildcardCounter = metricsRegistry.createCounter( + "search.query.type.wildcard.count", + "Counter for the number of top level and nested wildcard search queries", + UNIT + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index ca5ad087d3089..c26bd5eef8c15 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -82,7 +82,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncActionnull. + */ + public void setPhaseTook(Boolean phaseTook) { + this.phaseTook = phaseTook; + } + /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold, or null if the threshold is unspecified. @@ -719,7 +744,8 @@ public boolean equals(Object o) { && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) - && Objects.equals(pipeline, that.pipeline); + && Objects.equals(pipeline, that.pipeline) + && Objects.equals(phaseTook, that.phaseTook); } @Override @@ -740,7 +766,8 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - cancelAfterTimeInterval + cancelAfterTimeInterval, + phaseTook ); } @@ -783,6 +810,8 @@ public String toString() { + cancelAfterTimeInterval + ", pipeline=" + pipeline + + ", phaseTook=" + + phaseTook + "}"; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index bc43b65e5d844..e949c5e0bea29 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilder; import org.opensearch.script.Script; @@ -58,8 +59,9 @@ /** * A search action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchRequestBuilder extends ActionRequestBuilder { public SearchRequestBuilder(OpenSearchClient client, SearchAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java new file mode 100644 index 0000000000000..674363600b251 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.search.TotalHits; +import org.opensearch.common.annotation.InternalApi; + +import java.util.EnumMap; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * This class holds request-level context for search queries at the coordinator node + * + * @opensearch.internal + */ +@InternalApi +class SearchRequestContext { + private final SearchRequestOperationsListener searchRequestOperationsListener; + private long absoluteStartNanos; + private final Map phaseTookMap; + private TotalHits totalHits; + private final EnumMap shardStats; + + /** + * This constructor is for testing only + */ + SearchRequestContext() { + this(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger())); + } + + SearchRequestContext(SearchRequestOperationsListener searchRequestOperationsListener) { + this.searchRequestOperationsListener = searchRequestOperationsListener; + this.absoluteStartNanos = System.nanoTime(); + this.phaseTookMap = new HashMap<>(); + this.shardStats = new EnumMap<>(ShardStatsFieldNames.class); + } + + SearchRequestOperationsListener getSearchRequestOperationsListener() { + return searchRequestOperationsListener; + } + + void updatePhaseTookMap(String phaseName, Long tookTime) { + this.phaseTookMap.put(phaseName, tookTime); + } + + Map phaseTookMap() { + return phaseTookMap; + } + + /** + * Override absoluteStartNanos set in constructor. + * For testing only + */ + void setAbsoluteStartNanos(long absoluteStartNanos) { + this.absoluteStartNanos = absoluteStartNanos; + } + + /** + * Request start time in nanos + */ + long getAbsoluteStartNanos() { + return absoluteStartNanos; + } + + void setTotalHits(TotalHits totalHits) { + this.totalHits = totalHits; + } + + TotalHits totalHits() { + return totalHits; + } + + void setShardStats(int total, int successful, int skipped, int failed) { + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL, total); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL, successful); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED, skipped); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED, failed); + } + + String formattedShardStats() { + if (shardStats.isEmpty()) { + return ""; + } else { + return String.format( + Locale.ROOT, + "{%s:%s, %s:%s, %s:%s, %s:%s}", + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED) + ); + } + } +} + +enum ShardStatsFieldNames { + SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL("total"), + SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL("successful"), + SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED("skipped"), + SEARCH_REQUEST_SLOWLOG_SHARD_FAILED("failed"); + + private final String name; + + ShardStatsFieldNames(String name) { + this.name = name; + } + + @Override + public String toString() { + return this.name; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 89d725b56bded..056cb474eaf32 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.InternalApi; import java.util.List; @@ -18,14 +19,19 @@ * * @opensearch.internal */ -public interface SearchRequestOperationsListener { +@InternalApi +interface SearchRequestOperationsListener { void onPhaseStart(SearchPhaseContext context); - void onPhaseEnd(SearchPhaseContext context); + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); void onPhaseFailure(SearchPhaseContext context); + default void onRequestStart(SearchRequestContext searchRequestContext) {} + + default void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + /** * Holder of Composite Listeners * @@ -53,10 +59,10 @@ public void onPhaseStart(SearchPhaseContext context) { } @Override - public void onPhaseEnd(SearchPhaseContext context) { + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { - listener.onPhaseEnd(context); + listener.onPhaseEnd(context, searchRequestContext); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onPhaseEnd listener [{}] failed", listener), e); } @@ -73,5 +79,27 @@ public void onPhaseFailure(SearchPhaseContext context) { } } } + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestStart(searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestStart listener [{}] failed", listener), e); + } + } + } + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestEnd(context, searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestEnd listener [{}] failed", listener), e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java new file mode 100644 index 0000000000000..6a0d60ffd3984 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.search; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.OpenSearchLogMessage; +import org.opensearch.common.logging.SlowLogLevel; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.tasks.Task; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * The request-level search slow log implementation + * + * @opensearch.internal + */ +public final class SearchRequestSlowLog implements SearchRequestOperationsListener { + private static final Charset UTF_8 = StandardCharsets.UTF_8; + + private long warnThreshold; + private long infoThreshold; + private long debugThreshold; + private long traceThreshold; + private SlowLogLevel level; + + private final Logger logger; + + static final String CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX = "cluster.search.request.slowlog"; + + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.warn", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.info", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.debug", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.trace", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL = new Setting<>( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".level", + SlowLogLevel.TRACE.name(), + SlowLogLevel::parse, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + public SearchRequestSlowLog(ClusterService clusterService) { + this(clusterService, LogManager.getLogger(CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX)); // logger configured in log4j2.properties + } + + SearchRequestSlowLog(ClusterService clusterService, Logger logger) { + this.logger = logger; + Loggers.setLevel(this.logger, SlowLogLevel.TRACE.name()); + + this.warnThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING).nanos(); + this.infoThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING).nanos(); + this.debugThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING).nanos(); + this.traceThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING).nanos(); + this.level = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL); + + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING, this::setWarnThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING, this::setInfoThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING, this::setDebugThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING, this::setTraceThreshold); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL, this::setLevel); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + long tookInNanos = System.nanoTime() - searchRequestContext.getAbsoluteStartNanos(); + + if (warnThreshold >= 0 && tookInNanos > warnThreshold && level.isLevelEnabledFor(SlowLogLevel.WARN)) { + logger.warn(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (infoThreshold >= 0 && tookInNanos > infoThreshold && level.isLevelEnabledFor(SlowLogLevel.INFO)) { + logger.info(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (debugThreshold >= 0 && tookInNanos > debugThreshold && level.isLevelEnabledFor(SlowLogLevel.DEBUG)) { + logger.debug(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (traceThreshold >= 0 && tookInNanos > traceThreshold && level.isLevelEnabledFor(SlowLogLevel.TRACE)) { + logger.trace(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } + } + + /** + * Search request slow log message + * + * @opensearch.internal + */ + static final class SearchRequestSlowLogMessage extends OpenSearchLogMessage { + + SearchRequestSlowLogMessage(SearchPhaseContext context, long tookInNanos, SearchRequestContext searchRequestContext) { + super(prepareMap(context, tookInNanos, searchRequestContext), message(context, tookInNanos, searchRequestContext)); + } + + private static Map prepareMap( + SearchPhaseContext context, + long tookInNanos, + SearchRequestContext searchRequestContext + ) { + final Map messageFields = new HashMap<>(); + messageFields.put("took", TimeValue.timeValueNanos(tookInNanos)); + messageFields.put("took_millis", TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + messageFields.put("phase_took", searchRequestContext.phaseTookMap().toString()); + if (searchRequestContext.totalHits() != null) { + messageFields.put("total_hits", searchRequestContext.totalHits()); + } else { + messageFields.put("total_hits", "-1"); + } + messageFields.put("search_type", context.getRequest().searchType()); + messageFields.put("shards", searchRequestContext.formattedShardStats()); + + if (context.getRequest().source() != null) { + String source = escapeJson(context.getRequest().source().toString(FORMAT_PARAMS)); + messageFields.put("source", source); + } else { + messageFields.put("source", "{}"); + } + + messageFields.put("id", context.getTask().getHeader(Task.X_OPAQUE_ID)); + return messageFields; + } + + // Message will be used in plaintext logs + private static String message(SearchPhaseContext context, long tookInNanos, SearchRequestContext searchRequestContext) { + final StringBuilder sb = new StringBuilder(); + sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], "); + sb.append("took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); + sb.append("phase_took_millis[").append(searchRequestContext.phaseTookMap().toString()).append("], "); + if (searchRequestContext.totalHits() != null) { + sb.append("total_hits[").append(searchRequestContext.totalHits()).append("], "); + } else { + sb.append("total_hits[-1]"); + } + sb.append("search_type[").append(context.getRequest().searchType()).append("], "); + sb.append("shards[").append(searchRequestContext.formattedShardStats()).append("], "); + if (context.getRequest().source() != null) { + sb.append("source[").append(context.getRequest().source().toString(FORMAT_PARAMS)).append("], "); + } else { + sb.append("source[], "); + } + if (context.getTask().getHeader(Task.X_OPAQUE_ID) != null) { + sb.append("id[").append(context.getTask().getHeader(Task.X_OPAQUE_ID)).append("]"); + } else { + sb.append("id[]"); + } + return sb.toString(); + } + + private static String escapeJson(String text) { + byte[] sourceEscaped = JsonStringEncoder.getInstance().quoteAsUTF8(text); + return new String(sourceEscaped, UTF_8); + } + } + + void setWarnThreshold(TimeValue warnThreshold) { + this.warnThreshold = warnThreshold.nanos(); + } + + void setInfoThreshold(TimeValue infoThreshold) { + this.infoThreshold = infoThreshold.nanos(); + } + + void setDebugThreshold(TimeValue debugThreshold) { + this.debugThreshold = debugThreshold.nanos(); + } + + void setTraceThreshold(TimeValue traceThreshold) { + this.traceThreshold = traceThreshold.nanos(); + } + + void setLevel(SlowLogLevel level) { + this.level = level; + } + + protected long getWarnThreshold() { + return warnThreshold; + } + + protected long getInfoThreshold() { + return infoThreshold; + } + + protected long getDebugThreshold() { + return debugThreshold; + } + + protected long getTraceThreshold() { + return traceThreshold; + } + + SlowLogLevel getLevel() { + return level; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index ad299c11b987d..2813c41e043ee 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; @@ -19,8 +20,9 @@ /** * Request level search stats to track coordinator level node search latencies * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.11.0") public final class SearchRequestStats implements SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); @@ -49,7 +51,7 @@ public void onPhaseStart(SearchPhaseContext context) { } @Override - public void onPhaseEnd(SearchPhaseContext context) { + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); phaseStats.current.dec(); phaseStats.total.inc(); diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index a546311a1f668..899c71e91e3ab 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -33,7 +33,9 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; +import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; @@ -63,7 +65,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; @@ -74,8 +78,9 @@ /** * A response of a search request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SCROLL_ID = new ParseField("_scroll_id"); @@ -94,6 +99,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final ShardSearchFailure[] shardFailures; private final Clusters clusters; private final long tookInMillis; + private final PhaseTook phaseTook; public SearchResponse(StreamInput in) throws IOException { super(in); @@ -112,6 +118,11 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + phaseTook = in.readOptionalWriteable(PhaseTook::new); + } else { + phaseTook = null; + } skippedShards = in.readVInt(); pointInTimeId = in.readOptionalString(); } @@ -126,7 +137,32 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, null, shardFailures, clusters, null); + } + + public SearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + internalResponse, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + null, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( @@ -136,6 +172,7 @@ public SearchResponse( int successfulShards, int skippedShards, long tookInMillis, + PhaseTook phaseTook, ShardSearchFailure[] shardFailures, Clusters clusters, String pointInTimeId @@ -148,6 +185,7 @@ public SearchResponse( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.tookInMillis = tookInMillis; + this.phaseTook = phaseTook; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; assert scrollId == null || pointInTimeId == null : "SearchResponse can't have both scrollId [" @@ -210,6 +248,13 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } + /** + * How long the request took in each search phase. + */ + public PhaseTook getPhaseTook() { + return phaseTook; + } + /** * The total number of shards the search was executed on. */ @@ -298,6 +343,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); } builder.field(TOOK.getPreferredName(), tookInMillis); + if (phaseTook != null) { + phaseTook.toXContent(builder, params); + } builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); @@ -337,6 +385,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE Boolean terminatedEarly = null; int numReducePhases = 1; long tookInMillis = -1; + PhaseTook phaseTook = null; int successfulShards = -1; int totalShards = -1; int skippedShards = 0; // 0 for BWC @@ -401,6 +450,24 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE parser.skipChildren(); } } + } else if (PhaseTook.PHASE_TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + Map phaseTookMap = new HashMap<>(); + + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + try { + SearchPhaseName.valueOf(currentFieldName.toUpperCase(Locale.ROOT)); + phaseTookMap.put(currentFieldName, parser.longValue()); + } catch (final IllegalArgumentException ex) { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + phaseTook = new PhaseTook(phaseTookMap); } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; @@ -472,6 +539,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE successfulShards, skippedShards, tookInMillis, + phaseTook, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId @@ -491,6 +559,9 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(phaseTook); + } out.writeVInt(skippedShards); out.writeOptionalString(pointInTimeId); } @@ -504,8 +575,9 @@ public String toString() { * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful * and how many of them were skipped. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Clusters implements ToXContentFragment, Writeable { public static final Clusters EMPTY = new Clusters(0, 0, 0); @@ -604,6 +676,68 @@ public String toString() { } } + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.api + */ + @PublicApi(since = "1.0.0") + public static class PhaseTook implements ToXContentFragment, Writeable { + static final ParseField PHASE_TOOK = new ParseField("phase_took"); + private final Map phaseTookMap; + + public PhaseTook(Map phaseTookMap) { + this.phaseTookMap = phaseTookMap; + } + + private PhaseTook(StreamInput in) throws IOException { + this(in.readMap(StreamInput::readString, StreamInput::readLong)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PHASE_TOOK.getPreferredName()); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + if (phaseTookMap.containsKey(searchPhaseName.getName())) { + builder.field(searchPhaseName.getName(), phaseTookMap.get(searchPhaseName.getName())); + } else { + builder.field(searchPhaseName.getName(), 0); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PhaseTook phaseTook = (PhaseTook) o; + + if (phaseTook.phaseTookMap.equals(phaseTookMap)) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(phaseTookMap); + } + } + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index f90e98106f93f..054bd578cc56c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -236,6 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, + searchTimeProvider.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 579a01f0dd932..bca2c8a52b691 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -58,8 +59,9 @@ * to parse aggregations into, which are not serializable. This is the common part that can be * shared between core and client. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchResponseSections implements ToXContentFragment { public static final ParseField EXT_FIELD = new ParseField("ext"); diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java index cda00811e5500..044efdc36d04f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,8 +54,9 @@ /** * Transport request for a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchScrollRequest extends ActionRequest implements ToXContentObject { private String scrollId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java index 638c595216631..41b34bd9c6c9e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.Scroll; /** * A search scroll action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchScrollRequestBuilder extends ActionRequestBuilder { public SearchScrollRequestBuilder(OpenSearchClient client, SearchScrollAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java index fbd85a3fc0b8f..33d9bf70021a0 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.routing.PlainShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.Countable; import org.opensearch.common.util.PlainIterator; @@ -54,8 +55,9 @@ * the cluster alias. * @see OriginalIndices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchShardIterator implements Comparable, Countable { private final OriginalIndices originalIndices; diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index 54faaf363cb70..dfecf4f462c4d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.opensearch.common.MemoizedSupplier; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; @@ -46,8 +47,9 @@ * Task storing information about a currently running search shard request. * See {@link ShardSearchRequest}, {@link ShardFetchSearchRequest}, ... * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index 20370b7b17a07..d3c1043c50cce 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.CancellableTask; @@ -45,8 +46,9 @@ /** * Task storing information about a currently running {@link SearchRequest}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchTask extends CancellableTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchType.java b/server/src/main/java/org/opensearch/action/search/SearchType.java index cb86c0d6c1b4a..e549ec598380a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchType.java +++ b/server/src/main/java/org/opensearch/action/search/SearchType.java @@ -32,11 +32,14 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + /** * Search type represent the manner at which the search operation is executed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SearchType { /** * Same as {@link #QUERY_THEN_FETCH}, except for an initial scatter phase which goes and computes the distributed diff --git a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java index 4d3f9b71a919c..ef490844db692 100644 --- a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java @@ -37,6 +37,7 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -56,8 +57,9 @@ /** * Represents a failure to search on a specific shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSearchFailure extends ShardOperationFailedException { private static final String REASON_FIELD = "reason"; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index cff1005beff27..62886f7e9d981 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -67,7 +67,6 @@ import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; @@ -88,6 +87,7 @@ import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -98,6 +98,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -136,6 +137,13 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_QUERY_METRICS_ENABLED_SETTING = Setting.boolSetting( + "search.query.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take // precedence over the cluster setting value public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; @@ -154,6 +162,14 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( + SEARCH_PHASE_TOOK_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -168,7 +184,14 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -202,6 +227,18 @@ public TransportSearchAction( this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); this.searchRequestStats = searchRequestStats; + this.searchRequestSlowLog = searchRequestSlowLog; + this.metricsRegistry = metricsRegistry; + this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + } + + private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { + this.searchQueryMetricsEnabled = searchQueryMetricsEnabled; + if ((this.searchQueryMetricsEnabled == true) && this.searchQueryCategorizer == null) { + this.searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } } private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { @@ -252,6 +289,8 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust } /** + * Listener to track request-level tookTime and phase tookTimes from the coordinator. + * * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real @@ -261,11 +300,12 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider { + static final class SearchTimeProvider implements SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; + private boolean phaseTook = false; /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -291,6 +331,43 @@ long getAbsoluteStartMillis() { long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } + + public void setPhaseTook(boolean phaseTook) { + this.phaseTook = phaseTook; + } + + SearchResponse.PhaseTook getPhaseTook() { + if (phaseTook) { + Map phaseTookMap = new HashMap<>(); + // Convert Map to Map for SearchResponse() + for (SearchPhaseName searchPhaseName : phaseStatsMap.keySet()) { + phaseTookMap.put(searchPhaseName.getName(), phaseStatsMap.get(searchPhaseName)); + } + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + phaseStatsMap.put( + context.getCurrentPhase().getSearchPhaseName(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) + ); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName); + } } @Override @@ -332,13 +409,6 @@ public void executeRequest( SinglePhaseSearchAction phaseSearchAction, ActionListener listener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction asyncSearchAction( @@ -355,7 +425,8 @@ public AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext ) { return new AbstractSearchAsyncAction( actionName, @@ -375,7 +446,7 @@ public AbstractSearchAsyncAction asyncSearchAction( new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), clusters, - searchRequestOperationsListener + searchRequestContext ) { @Override protected void executePhaseOnShard( @@ -419,20 +490,62 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); + + final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger) + ); + searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + PipelinedRequest searchRequest; ActionListener listener; try { searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = ActionListener.wrap( - r -> originalListener.onResponse(searchRequest.transformResponse(r)), - originalListener::onFailure - ); + listener = searchRequest.transformResponseListener(originalListener); } catch (Exception e) { originalListener.onFailure(e); return; } - ActionListener rewriteListener = ActionListener.wrap(source -> { + ActionListener requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + + ActionListener rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestContext + ); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); + } + + private ActionListener buildRewriteListener( + SearchRequest searchRequest, + Task task, + SearchTimeProvider timeProvider, + SearchAsyncActionProvider searchAsyncActionProvider, + ActionListener listener, + SearchRequestContext searchRequestContext + ) { + return ActionListener.wrap(source -> { if (source != searchRequest.source()) { // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch // situations when source is rewritten to null due to a bug @@ -462,7 +575,8 @@ private void executeRequest( clusterState, listener, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -483,7 +597,8 @@ private void executeRequest( clusterState, l, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ) ); } else { @@ -533,22 +648,14 @@ private void executeRequest( listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ); }, listener::onFailure) ); } } }, listener::onFailure); - if (searchRequest.source() == null) { - rewriteListener.onResponse(searchRequest.source()); - } else { - Rewriteable.rewriteAndFetch( - searchRequest.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener - ); - } } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { @@ -622,6 +729,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), + timeProvider.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -811,7 +919,8 @@ private void executeLocalSearch( ClusterState clusterState, ActionListener listener, SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestContext searchRequestContext ) { executeSearch( (SearchTask) task, @@ -825,7 +934,8 @@ private void executeLocalSearch( listener, SearchResponse.Clusters.EMPTY, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ); } @@ -943,7 +1053,8 @@ private void executeSearch( ActionListener listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestContext searchRequestContext ) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name @@ -1044,7 +1155,8 @@ private void executeSearch( listener, preFilterSearchShards, threadPool, - clusters + clusters, + searchRequestContext ).start(); } @@ -1127,15 +1239,37 @@ AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext ); } - private List createSearchListenerList() { + private List createSearchListenerList(SearchRequest searchRequest, SearchTimeProvider timeProvider) { final List searchListenersList = new ArrayList<>(); + if (isRequestStatsEnabled) { searchListenersList.add(searchRequestStats); } + + // phase_took is enabled with request param and/or cluster setting + Boolean phaseTookRequestParam = searchRequest.isPhaseTook(); + if (phaseTookRequestParam == null) { // check cluster setting only when request param is undefined + if (clusterService.getClusterSettings().get(TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED)) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + } else if (phaseTookRequestParam == true) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + + if (searchRequestSlowLog.getWarnThreshold() >= 0 + || searchRequestSlowLog.getInfoThreshold() >= 0 + || searchRequestSlowLog.getDebugThreshold() >= 0 + || searchRequestSlowLog.getTraceThreshold() >= 0) { + searchListenersList.add(searchRequestSlowLog); + } + return searchListenersList; } @@ -1153,15 +1287,9 @@ private AbstractSearchAsyncAction searchAsyncAction ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } if (preFilter) { return new CanMatchPreFilterSearchPhase( logger, @@ -1192,7 +1320,8 @@ private AbstractSearchAsyncAction searchAsyncAction listener, false, threadPool, - clusters + clusters, + searchRequestContext ); return new SearchPhase(action.getName()) { @Override @@ -1202,7 +1331,7 @@ public void run() { }; }, clusters, - searchRequestOperationsListener + searchRequestContext ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1233,7 +1362,7 @@ public void run() { clusterState, task, clusters, - searchRequestOperationsListener + searchRequestContext ); break; case QUERY_THEN_FETCH: @@ -1254,7 +1383,7 @@ public void run() { clusterState, task, clusters, - searchRequestOperationsListener + searchRequestContext ); break; default: diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java index 15275ba48fc6e..e91342a7ce4b8 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ * A class whose instances represent a value for counting the number * of active shard copies for a given shard in an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ActiveShardCount implements Writeable { private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2; diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index d30ee7e11bdfa..2d9fecddb6f7d 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -32,6 +32,7 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,15 +56,17 @@ * Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded * to actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesOptions implements ToXContentFragment { /** * The wildcard states. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum WildcardStates { OPEN, CLOSED, diff --git a/server/src/main/java/org/opensearch/action/support/RetryableAction.java b/server/src/main/java/org/opensearch/action/support/RetryableAction.java index fc2ee277b538a..d555a876f20fb 100644 --- a/server/src/main/java/org/opensearch/action/support/RetryableAction.java +++ b/server/src/main/java/org/opensearch/action/support/RetryableAction.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionRunnable; import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -52,8 +53,9 @@ * default. The action will be retried with exponentially increasing delay periods until the timeout period * has been reached. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RetryableAction { private final Logger logger; diff --git a/server/src/main/java/org/opensearch/action/support/WriteRequest.java b/server/src/main/java/org/opensearch/action/support/WriteRequest.java index f462464b99ce8..8d53f7b005d54 100644 --- a/server/src/main/java/org/opensearch/action/support/WriteRequest.java +++ b/server/src/main/java/org/opensearch/action/support/WriteRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.update.UpdateRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -78,8 +79,9 @@ default R setRefreshPolicy(String refreshPolicy) { /** * The refresh policy of the request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum RefreshPolicy implements Writeable { /** * Don't refresh after this request. The default. diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java index 5d09d880336f1..279ad401f7e56 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java @@ -31,6 +31,7 @@ package org.opensearch.action.support.master; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * A response that indicates that a request has been acknowledged * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AcknowledgedResponse extends ActionResponse implements ToXContentObject { private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index c52bc6c2c799d..bb6bfc6fd4773 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -33,6 +33,7 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.RetryableAction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.index.shard.ShardId; @@ -51,8 +52,9 @@ /** * Pending Replication Actions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingReplicationActions implements Consumer, Releasable { private final Map>> onGoingReplicationActions = ConcurrentCollections.newConcurrentMap(); diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java index f9b85cc4bd7aa..dd3a38c1398ab 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java @@ -8,11 +8,14 @@ package org.opensearch.action.support.replication; +import org.opensearch.common.annotation.PublicApi; + /** * The type of replication used for inter-node replication. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum ReplicationMode { /** * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and is replicated logically. diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java index 825405cb68549..67480976c500f 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java @@ -36,6 +36,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -57,8 +58,9 @@ /** * Base class for write action responses. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationResponse extends ActionResponse { public static final ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0]; @@ -88,8 +90,9 @@ public void setShardInfo(ShardInfo shardInfo) { /** * Holds shard information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardInfo implements Writeable, ToXContentObject { private static final String TOTAL = "total"; @@ -235,8 +238,9 @@ public String toString() { /** * Holds failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends ShardOperationFailedException implements ToXContentObject { private static final String _INDEX = "_index"; diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index b68bd13cfed80..ddebdc5530e70 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -134,6 +134,12 @@ public abstract class TransportReplicationAction< Setting.Property.NodeScope ); + /** + * Making primary and replica actions suffixes as constant + */ + public static final String PRIMARY_ACTION_SUFFIX = "[p]"; + public static final String REPLICA_ACTION_SUFFIX = "[r]"; + protected final ThreadPool threadPool; protected final TransportService transportService; protected final ClusterService clusterService; @@ -204,8 +210,8 @@ protected TransportReplicationAction( this.shardStateAction = shardStateAction; this.executor = executor; - this.transportPrimaryAction = actionName + "[p]"; - this.transportReplicaAction = actionName + "[r]"; + this.transportPrimaryAction = actionName + PRIMARY_ACTION_SUFFIX; + this.transportReplicaAction = actionName + REPLICA_ACTION_SUFFIX; this.initialRetryBackoffBound = REPLICATION_INITIAL_RETRY_BACKOFF_BOUND.get(settings); this.retryTimeout = REPLICATION_RETRY_TIMEOUT.get(settings); diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index a0b5299805868..9ebfa8cfd0df8 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -59,6 +59,11 @@ import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,6 +87,7 @@ public abstract class TransportWriteAction< protected final SystemIndices systemIndices; private final Function executorFunction; + private final Tracer tracer; protected TransportWriteAction( Settings settings, @@ -97,7 +103,8 @@ protected TransportWriteAction( Function executorFunction, boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -119,6 +126,7 @@ protected TransportWriteAction( this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; this.systemIndices = systemIndices; + this.tracer = tracer; } protected String executor(IndexShard shard) { @@ -220,7 +228,12 @@ protected void shardOperationOnPrimary( threadPool.executor(executor).execute(new ActionRunnable>(listener) { @Override protected void doRun() { - dispatchedShardOperationOnPrimary(request, primary, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnPrimary", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnPrimary(request, primary, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -248,7 +261,12 @@ protected void shardOperationOnReplica(ReplicaRequest request, IndexShard replic threadPool.executor(executorFunction.apply(replica)).execute(new ActionRunnable(listener) { @Override protected void doRun() { - dispatchedShardOperationOnReplica(request, replica, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnReplica", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnReplica(request, replica, TraceableActionListener.create(listener, span, tracer)); + } } @Override diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java index 80ca1629417ad..8dbf3dd4df512 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.termvectors; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * A single multi term response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsItemResponse implements Writeable { private final TermVectorsResponse response; diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java index c055564c3fcbe..0eef737a54bb3 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentParser; @@ -54,8 +55,9 @@ /** * A single multi get request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsRequest extends ActionRequest implements Iterable, diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java index 04dfd39112d6e..6bfa402575885 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A single multi get request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder { public MultiTermVectorsRequestBuilder(OpenSearchClient client, MultiTermVectorsAction action) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java index 5d40e64df1e3e..8a059829dda0e 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,15 +49,17 @@ /** * A multi get response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContentObject { /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable { private final String index; private final String id; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java index 0e0202777794b..89d2f8567b3cb 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.TFIDFSimilarity; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.HashMap; @@ -49,8 +50,9 @@ * Filter the term vector (doc frequency, positions, offsets) for a * document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsFilter { public static final int DEFAULT_MAX_QUERY_TERMS = 25; public static final int DEFAULT_MIN_TERM_FREQ = 0; @@ -179,8 +181,9 @@ public void setMaxWordLength(int maxWordLength) { /** * Internal score term * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ScoreTerm { public String field; public String word; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index 825b0b4982880..a761cabb9599a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -40,6 +40,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.support.single.shard.SingleShardRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentType; @@ -74,8 +75,9 @@ * Note, the {@link #index()}, and {@link #id(String)} are * required. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { private static final ParseField INDEX = new ParseField("_index"); private static final ParseField ID = new ParseField("_id"); @@ -118,8 +120,9 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Internal filter settings * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class FilterSettings { public Integer maxNumTerms; public Integer minTermFreq; @@ -572,8 +575,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * The flags. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Flag { // Do not change the order of these flags we use // the ordinal for encoding! Only append to the end! diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java index 02cfff1a6682b..ce68c06206fef 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; @@ -46,8 +47,9 @@ * Note, the {@code index}, {@code type} and {@code id} are * required. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsRequestBuilder extends ActionRequestBuilder { public TermVectorsRequestBuilder(OpenSearchClient client, TermVectorsAction action) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java index 3c338ce7338bb..7ad27808588ae 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java @@ -41,6 +41,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionResponse; @@ -62,8 +63,9 @@ * Response returning the term vector (doc frequency, positions, offsets) for a * document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsResponse extends ActionResponse implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index 946c3c2446173..9654bd1c114ba 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -42,6 +42,7 @@ import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.support.single.instance.InstanceShardOperationRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; @@ -74,8 +75,9 @@ /** * Transport request for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateRequest extends InstanceShardOperationRequest implements DocWriteRequest, diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java index c97d0b4f5d13d..d662381ac5e19 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; @@ -49,8 +50,9 @@ /** * Transport request builder for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder implements WriteRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java index d9f6ceacb0f3b..c7ee1742af0f2 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.update; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; @@ -48,8 +49,9 @@ /** * Transport response for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateResponse extends DocWriteResponse { private static final String GET = "get"; @@ -173,8 +175,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to * instantiate the {@link UpdateResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { private GetResult getResult = null; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index fab104142e5bb..904083e96032f 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; import org.opensearch.gateway.GatewayService; @@ -52,8 +53,9 @@ /** * An event received by the local node, signaling that the cluster state has changed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterChangedEvent { private final String source; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 5bd858a53bee2..4c38d6fd99f5d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -56,8 +57,9 @@ * InternalClusterInfoService.shardIdentifierFromRouting(String) * for the key used in the shardSizes map * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterInfo implements ToXContentFragment, Writeable { private final Map leastAvailableSpaceUsage; private final Map mostAvailableSpaceUsage; @@ -287,8 +289,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Represents the total amount of "reserved" space on a particular data path, together with the set of shards considered. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ReservedSpace implements Writeable { public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>()); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterName.java b/server/src/main/java/org/opensearch/cluster/ClusterName.java index 44321f7de9395..d6149421c3fd0 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterName.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterName.java @@ -32,6 +32,7 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; @@ -45,8 +46,9 @@ /** * Cluster Name * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterName implements Writeable { public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 1b87a60c2ccf5..9e63f961d241d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; @@ -97,8 +98,9 @@ * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterState implements ToXContentFragment, Diffable { public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); @@ -139,8 +141,9 @@ static boolean shouldSerializ /** * Custom cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable, ToXContentFragment, FeatureAware { /** @@ -596,8 +599,9 @@ public static Builder builder(ClusterState state) { /** * Builder for cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java index 140e6426bb801..d7702729ae884 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; /** * A component that is in charge of applying an incoming cluster state to the node internal data structures. * The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateApplier { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java index 01a8e51a3d13e..57fcfcad099ed 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java @@ -32,11 +32,14 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * A listener to be notified when a cluster state changes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java index 9a4b708548a7d..149d93a158007 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java @@ -33,13 +33,15 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Cluster state update task configuration for timeout and priority * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskConfig { /** * The timeout for this cluster state update task configuration. If diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index bf8494cc36857..115816798959e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.IdentityHashMap; import java.util.List; @@ -41,8 +42,9 @@ /** * Interface that updates the cluster state based on the task * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskExecutor { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state @@ -103,8 +105,9 @@ default ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ClusterTasksResult { @Nullable public final ClusterState resultingState; @@ -127,8 +130,9 @@ public static Builder builder() { /** * Builder for cluster state task. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map executionResults = new IdentityHashMap<>(); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index d6c4abfad7b8d..0cb24bd3f3eab 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -32,14 +32,16 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Interface to implement a cluster state change listener * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 9225914a931b2..ae6626dd4785d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -34,6 +34,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import java.util.List; @@ -41,8 +42,9 @@ /** * A task that can update the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, diff --git a/server/src/main/java/org/opensearch/cluster/Diff.java b/server/src/main/java/org/opensearch/cluster/Diff.java index c0e8e7038d9b4..77301b7c04b66 100644 --- a/server/src/main/java/org/opensearch/cluster/Diff.java +++ b/server/src/main/java/org/opensearch/cluster/Diff.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; /** * Represents difference between states of cluster state parts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Diff extends Writeable { /** diff --git a/server/src/main/java/org/opensearch/cluster/DiskUsage.java b/server/src/main/java/org/opensearch/cluster/DiskUsage.java index c472522baee51..33ed030c58a02 100644 --- a/server/src/main/java/org/opensearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/opensearch/cluster/DiskUsage.java @@ -32,6 +32,7 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Encapsulation class used to represent the amount of disk used on a node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiskUsage implements ToXContentFragment, Writeable { final String nodeId; final String nodeName; diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index c07dcc5daaee6..0b2ed7ef66d43 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -31,12 +31,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index 31c0b294b8004..2487aaf0d7c51 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -31,14 +31,17 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.DeprecatedApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link LocalNodeClusterManagerListener} */ @Deprecated +@DeprecatedApi(since = "2.2.0") public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiff.java b/server/src/main/java/org/opensearch/cluster/NamedDiff.java index ce971aa723394..e994cfb224386 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiff.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiff.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; /** * Diff that also support NamedWriteable interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface NamedDiff> extends Diff, NamedWriteable { /** * The minimal version of the recipient this custom object can be sent to diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 5b7bb19a935b1..1c12c260b3929 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; @@ -81,8 +82,9 @@ * This component does not block on disconnections at all, because a disconnection might need to wait for an ongoing (background) connection * attempt to complete first. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeConnectionsService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 3e0c78099e6b4..769f97373f7b7 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -54,8 +55,9 @@ /** * Meta data about restore processes that are currently executing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreInProgress extends AbstractNamedDiffable implements Custom, Iterable { /** @@ -139,8 +141,9 @@ public RestoreInProgress build() { /** * Restore metadata * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Entry { private final String uuid; private final State state; @@ -238,8 +241,9 @@ public int hashCode() { /** * Represents status of a restored shard * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardRestoreStatus implements Writeable { private State state; private String nodeId; @@ -363,8 +367,9 @@ public int hashCode() { /** * Shard restore process state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { /** * Initializing state diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 952591f346536..3de23d2490c63 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -928,8 +929,9 @@ public String toString() { /** * State of the snapshots. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { INIT((byte) 0, false), STARTED((byte) 1, false), diff --git a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java index eb31fa2b7e69d..f0fa5af64d157 100644 --- a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * An exception to cluster state listener that allows for timeouts and for post added notifications. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TimeoutClusterStateListener extends ClusterStateListener { void postAdded(); diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java index db26496c6f263..837bf8af449ad 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java @@ -32,11 +32,14 @@ package org.opensearch.cluster.ack; +import org.opensearch.common.annotation.PublicApi; + /** * Base response returned after a cluster state update * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateUpdateResponse { private final boolean acknowledged; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java index eae79e2ac0986..b4d797a39dd2f 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,7 +32,10 @@ /** * Cluster Awareness AttributeValue Health information + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessAttributeValueHealth implements Writeable, ToXContentFragment { private static final String ACTIVE_SHARDS = "active_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java index 01d9cfc4438e3..3de2260d0e8bd 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -34,7 +35,9 @@ /** * Cluster Awareness health information * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessAttributesHealth implements Iterable, Writeable, ToXContentFragment { private final String awarenessAttributeName; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java index 1643db5a1f460..841764110626d 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java @@ -11,6 +11,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; @@ -30,7 +31,10 @@ /** * Cluster state Awareness health information + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessHealth implements Writeable, ToXContentFragment, Iterable { private static final String AWARENESS_ATTRIBUTE = "awareness_attributes"; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java index 40a5080ba74e7..5fa897c0b1185 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.block; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ /** * Blocks the cluster for concurrency * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlock implements Writeable, ToXContentFragment { private final int id; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java index 4673f075e8439..ea6c0c69c523d 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.block; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.rest.RestStatus; @@ -49,8 +50,9 @@ /** * Internal exception on obtaining a cluster block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlockException extends OpenSearchException { private final Set blocks; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java index 5ec847e100c86..5d3bf94aedb19 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java @@ -32,13 +32,16 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; + import java.util.EnumSet; /** * What level to block the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ClusterBlockLevel { READ, WRITE, diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index bcb4f7f71268e..304136166d515 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -59,8 +60,9 @@ /** * Represents current cluster level blocks to block dirty operations done against the cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), Map.of()); @@ -355,8 +357,9 @@ public static Builder builder() { /** * Builder for cluster blocks. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Set global = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java index 729d76c72e99e..3e9fa3e23a12f 100644 --- a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Internal exception on obtaining an index create block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexCreateBlockException extends ClusterBlockException { public IndexCreateBlockException(Set globalLevelBlocks) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 3a506397bcac8..0ef8262a216ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -34,14 +34,16 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; /** * Publishes the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish @@ -59,8 +61,9 @@ public interface ClusterStatePublisher { /** * An acknowledgement listener. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface AckListener { /** * Should be called when the cluster coordination layer has committed the cluster state (i.e. even if this publication fails, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java index 869c86ae75b90..53398d6f3f98f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.coordination; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Metadata for cluster coordination * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CoordinationMetadata implements Writeable, ToXContentFragment { public static final CoordinationMetadata EMPTY_METADATA = builder().build(); @@ -214,8 +216,9 @@ public String toString() { /** * Builder for coordination metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private long term = 0; private VotingConfiguration lastCommittedConfiguration = VotingConfiguration.EMPTY_CONFIG; @@ -266,8 +269,9 @@ public CoordinationMetadata build() { /** * Excluded nodes from voting config. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfigExclusion implements Writeable, ToXContentFragment { public static final String MISSING_VALUE_MARKER = "_absent_"; private final String nodeId; @@ -362,8 +366,9 @@ public String toString() { /** * A collection of persistent node ids, denoting the voting configuration for cluster state changes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index a339852e6ed8d..987a3e3ffa7d3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -638,6 +638,12 @@ public interface PersistedState extends Closeable { */ void setLastAcceptedState(ClusterState clusterState); + /** + * Returns the stats for the persistence layer for {@link CoordinationState}. + * @return PersistedStateStats + */ + PersistedStateStats getStats(); + /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index eb30460ca1b7f..5a07f964f94a4 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -56,6 +56,7 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -260,9 +261,10 @@ public Coordinator( this::handlePublishRequest, this::handleApplyCommit ); - this.leaderChecker = new LeaderChecker(settings, transportService, this::onLeaderFailure, nodeHealthService); + this.leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, this::onLeaderFailure, nodeHealthService); this.followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, this::onFollowerCheckRequest, this::removeNode, @@ -865,7 +867,16 @@ protected void doStart() { @Override public DiscoveryStats stats() { - return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats()); + ClusterStateStats clusterStateStats = clusterManagerService.getClusterStateStats(); + ArrayList stats = new ArrayList<>(); + Stream.of(PersistedStateRegistry.PersistedStateType.values()).forEach(stateType -> { + if (persistedStateRegistry.getPersistedState(stateType) != null + && persistedStateRegistry.getPersistedState(stateType).getStats() != null) { + stats.add(persistedStateRegistry.getPersistedState(stateType).getStats()); + } + }); + clusterStateStats.setPersistenceStats(stats); + return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats(), clusterStateStats); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index f69a4f771cf21..70bb0515bb022 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -98,7 +99,9 @@ public class FollowersChecker { "cluster.fault_detection.follower_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the follower is considered to have failed. @@ -112,7 +115,7 @@ public class FollowersChecker { private final Settings settings; private final TimeValue followerCheckInterval; - private final TimeValue followerCheckTimeout; + private TimeValue followerCheckTimeout; private final int followerCheckRetryCount; private final BiConsumer onNodeFailure; private final Consumer handleRequestAndUpdateState; @@ -127,6 +130,7 @@ public class FollowersChecker { public FollowersChecker( Settings settings, + ClusterSettings clusterSettings, TransportService transportService, Consumer handleRequestAndUpdateState, BiConsumer onNodeFailure, @@ -141,7 +145,7 @@ public FollowersChecker( followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); followerCheckRetryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); - + clusterSettings.addSettingsUpdateConsumer(FOLLOWER_CHECK_TIMEOUT_SETTING, this::setFollowerCheckTimeout); updateFastResponseState(0, Mode.CANDIDATE); transportService.registerRequestHandler( FOLLOWER_CHECK_ACTION_NAME, @@ -159,6 +163,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setFollowerCheckTimeout(TimeValue followerCheckTimeout) { + this.followerCheckTimeout = followerCheckTimeout; + } + /** * Update the set of known nodes, starting to check any new ones and stopping checking any previously-known-but-now-unknown ones. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java index 67ef82ee7b2e9..b77ede5471534 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java @@ -65,6 +65,11 @@ public void setLastAcceptedState(ClusterState clusterState) { this.acceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public long getCurrentTerm() { return currentTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index 69ba1f977f326..8d4373b865f62 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -97,7 +98,9 @@ public class LeaderChecker { "cluster.fault_detection.leader_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the leader is considered to have failed. @@ -111,7 +114,7 @@ public class LeaderChecker { private final Settings settings; private final TimeValue leaderCheckInterval; - private final TimeValue leaderCheckTimeout; + private TimeValue leaderCheckTimeout; private final int leaderCheckRetryCount; private final TransportService transportService; private final Consumer onLeaderFailure; @@ -123,6 +126,7 @@ public class LeaderChecker { LeaderChecker( final Settings settings, + final ClusterSettings clusterSettings, final TransportService transportService, final Consumer onLeaderFailure, NodeHealthService nodeHealthService @@ -134,6 +138,7 @@ public class LeaderChecker { this.transportService = transportService; this.onLeaderFailure = onLeaderFailure; this.nodeHealthService = nodeHealthService; + clusterSettings.addSettingsUpdateConsumer(LEADER_CHECK_TIMEOUT_SETTING, this::setLeaderCheckTimeout); transportService.registerRequestHandler( LEADER_CHECK_ACTION_NAME, @@ -155,6 +160,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setLeaderCheckTimeout(TimeValue leaderCheckTimeout) { + this.leaderCheckTimeout = leaderCheckTimeout; + } + public DiscoveryNode leader() { CheckScheduler checkScheduler = currentChecker.get(); return checkScheduler == null ? null : checkScheduler.leader; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java new file mode 100644 index 0000000000000..0b7ed4fee5775 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Persisted cluster state related stats. + * + * @opensearch.api + */ +@PublicApi(since = "2.12.0") +public class PersistedStateStats implements Writeable, ToXContentObject { + private final String statsName; + private AtomicLong totalTimeInMillis = new AtomicLong(0); + private AtomicLong failedCount = new AtomicLong(0); + private AtomicLong successCount = new AtomicLong(0); + private Map extendedFields = new HashMap<>(); // keeping minimal extensibility + + public PersistedStateStats(String statsName) { + this.statsName = statsName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(statsName); + out.writeVLong(successCount.get()); + out.writeVLong(failedCount.get()); + out.writeVLong(totalTimeInMillis.get()); + if (extendedFields.size() > 0) { + out.writeBoolean(true); + out.writeVInt(extendedFields.size()); + for (Map.Entry extendedField : extendedFields.entrySet()) { + out.writeString(extendedField.getKey()); + out.writeVLong(extendedField.getValue().get()); + } + } else { + out.writeBoolean(false); + } + } + + public PersistedStateStats(StreamInput in) throws IOException { + this.statsName = in.readString(); + this.successCount = new AtomicLong(in.readVLong()); + this.failedCount = new AtomicLong(in.readVLong()); + this.totalTimeInMillis = new AtomicLong(in.readVLong()); + if (in.readBoolean()) { + int extendedFieldsSize = in.readVInt(); + this.extendedFields = new HashMap<>(); + for (int fieldNumber = 0; fieldNumber < extendedFieldsSize; fieldNumber++) { + extendedFields.put(in.readString(), new AtomicLong(in.readVLong())); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(statsName); + builder.field(Fields.SUCCESS_COUNT, getSuccessCount()); + builder.field(Fields.FAILED_COUNT, getFailedCount()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getTotalTimeInMillis()); + if (extendedFields.size() > 0) { + for (Map.Entry extendedField : extendedFields.entrySet()) { + builder.field(extendedField.getKey(), extendedField.getValue().get()); + } + } + builder.endObject(); + return builder; + } + + public void stateFailed() { + failedCount.incrementAndGet(); + } + + public void stateSucceeded() { + successCount.incrementAndGet(); + } + + /** + * Expects user to send time taken in milliseconds. + * + * @param timeTakenInUpload time taken in uploading the cluster state to remote + */ + public void stateTook(long timeTakenInUpload) { + totalTimeInMillis.addAndGet(timeTakenInUpload); + } + + public long getTotalTimeInMillis() { + return totalTimeInMillis.get(); + } + + public long getFailedCount() { + return failedCount.get(); + } + + public long getSuccessCount() { + return successCount.get(); + } + + protected void addToExtendedFields(String extendedField, AtomicLong extendedFieldValue) { + this.extendedFields.put(extendedField, extendedFieldValue); + } + + public String getStatsName() { + return statsName; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String SUCCESS_COUNT = "success_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java index 3f67870781580..914743299b023 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.decommission; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * {@link DecommissionAttribute} encapsulates information about decommissioned node attribute like attribute name, attribute value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public final class DecommissionAttribute implements Writeable { private final String attributeName; private final String attributeValue; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index 3df807a4f94d3..254d70b0422d7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,8 +31,9 @@ /** * Contains metadata about decommission attribute * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionAttributeMetadata extends AbstractNamedDiffable implements Custom { public static final String TYPE = "decommissionedAttribute"; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java index 4ca8c3cc4286e..f4f6cbf632ae2 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java @@ -8,9 +8,14 @@ package org.opensearch.cluster.decommission; +import org.opensearch.common.annotation.PublicApi; + /** * An enumeration of the states during decommissioning + * + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum DecommissionStatus { /** * Decommission process is initiated, and to-be-decommissioned leader is excluded from voting config diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java index 5ea482b8b8ffa..06f6b3d57385d 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.health; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * Cluster health status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ClusterHealthStatus implements Writeable { GREEN((byte) 0), YELLOW((byte) 1), diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java index 0bb762e3ff744..19c64965e6941 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -61,8 +62,9 @@ /** * Cluster Index Health Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterIndexHealth implements Iterable, Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String NUMBER_OF_SHARDS = "number_of_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index cbc63565228f9..1fe88f65248c2 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -58,8 +59,9 @@ /** * Cluster shard health information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterShardHealth implements Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String ACTIVE_SHARDS = "active_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java index 31e37078d8b6c..8b3cc3c3cc2cc 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; @@ -61,8 +62,9 @@ /** * Metadata for index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AliasMetadata extends AbstractDiffable implements ToXContentFragment { private final String alias; @@ -271,8 +273,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Builder of alias metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final String alias; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java index cab1b75afe3e1..abc3712ee07e3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ * contains a field "foo", it's expected to contain all the necessary settings/mappings/etc for the * "foo" field. These component templates make up the individual pieces composing an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField VERSION = new ParseField("version"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 5f1291f6c6d82..e7f1b97f28842 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.DataStream.TimestampField; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -63,8 +64,9 @@ * ids corresponding to component templates that should be composed in order when creating a new * index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); private static final ParseField TEMPLATE = new ParseField("template"); @@ -284,8 +286,9 @@ public String toString() { /** * Template for data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField TIMESTAMP_FIELD_FIELD = new ParseField("timestamp_field"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java index 27803cb106005..8bb25ba8e8472 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java @@ -10,6 +10,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.admin.cluster.crypto.CryptoSettings; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -24,8 +25,9 @@ /** * Metadata about encryption and decryption * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public class CryptoMetadata implements Writeable { static final public String CRYPTO_METADATA_KEY = "crypto_metadata"; static final public String KEY_PROVIDER_NAME_KEY = "key_provider_name"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java index 753f872f88b22..54df245b1b835 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java @@ -37,6 +37,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -59,8 +60,9 @@ /** * Primary DataStream class * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class DataStream extends AbstractDiffable implements ToXContentObject { public static final String BACKING_INDEX_PREFIX = ".ds-"; @@ -258,8 +260,9 @@ public int hashCode() { /** * A timestamp field. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class TimestampField implements Writeable, ToXContentObject { static ParseField NAME_FIELD = new ParseField("name"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java index 8209c7bb56ad8..a8102182576ff 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.Diffable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * This is a {@code Map} that implements AbstractDiffable so it * can be used for cluster state purposes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiffableStringMap extends AbstractMap implements Diffable { public static final DiffableStringMap EMPTY = new DiffableStringMap(Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java index 0c316373e484f..0f3ee894a7f63 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java @@ -33,6 +33,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.Strings; @@ -52,8 +53,9 @@ * An index abstraction has a unique name and encapsulates all the {@link IndexMetadata} instances it is pointing to. * Also depending on type it may refer to a single or many concrete indices and may or may not have a write index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexAbstraction { /** @@ -102,7 +104,10 @@ public interface IndexAbstraction { /** * An index abstraction type. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { /** @@ -335,8 +340,9 @@ private boolean isNonEmpty(List idxMetas) { /** * A data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class DataStream implements IndexAbstraction { private final org.opensearch.cluster.metadata.DataStream dataStream; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 85a203e5e059a..b4f8b6b188531 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.Diff; import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; @@ -68,8 +69,9 @@ * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexGraveyard implements Metadata.Custom { /** @@ -191,8 +193,9 @@ public static IndexGraveyard.Builder builder(final IndexGraveyard graveyard) { /** * A class to build an IndexGraveyard. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Builder { private List tombstones; private int numPurged = -1; @@ -367,8 +370,9 @@ public String getWriteableName() { /** * An individual tombstone entry for representing a deleted index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Tombstone implements ToXContentObject, Writeable { private static final String INDEX_KEY = "index"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 5d8b751b241e2..03784df509ed6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.allocation.IndexMetadataUpdater; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Setting; @@ -96,8 +97,9 @@ /** * Index metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexMetadata implements Diffable, ToXContentFragment { public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock( @@ -159,8 +161,9 @@ public class IndexMetadata implements Diffable, ToXContentFragmen /** * The state of the index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { OPEN((byte) 0), CLOSE((byte) 1); @@ -450,8 +453,9 @@ public Iterator> settings() { /** * Blocks the API. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum APIBlock implements Writeable { READ_ONLY("read_only", INDEX_READ_ONLY_BLOCK), READ("read", INDEX_READ_BLOCK), @@ -1209,8 +1213,9 @@ public static Builder builder(IndexMetadata indexMetadata) { /** * Builder of index metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String index; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index c5efb55316b84..3d532208bcfe2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -273,8 +273,9 @@ public String toString() { /** * Builder of index template metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 2081a0035303b..e8180613c0fa3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; @@ -56,8 +57,9 @@ /** * Mapping configuration for a type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MappingMetadata extends AbstractDiffable { public static final MappingMetadata EMPTY_MAPPINGS = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 626903877b0c6..1871ed24973c2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -98,8 +99,9 @@ /** * Metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Metadata implements Iterable, Diffable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -127,8 +129,9 @@ public boolean isSegmentReplicationEnabled(String indexName) { /** * Context of the XContent. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum XContentContext { /* Custom metadata should be returns as part of API call */ API, @@ -166,8 +169,9 @@ public enum XContentContext { /** * Custom metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable, ToXContentFragment, ClusterState.FeatureAware { EnumSet context(); @@ -922,19 +926,26 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 if (!metadata1.coordinationMetadata.equals(metadata2.coordinationMetadata)) { return false; } - if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { + if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { return false; } - if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { + if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { return false; } - if (!metadata1.templates.equals(metadata2.templates())) { + if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { return false; } - if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { + return isGlobalResourcesMetadataEquals(metadata1, metadata2); + } + + /** + * Compares Metadata entities persisted in Remote Store. + */ + public static boolean isGlobalResourcesMetadataEquals(Metadata metadata1, Metadata metadata2) { + if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { return false; } - if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { + if (!metadata1.templates.equals(metadata2.templates())) { return false; } // Check if any persistent metadata needs to be saved @@ -1122,8 +1133,9 @@ public static Builder builder(Metadata metadata) { /** * Builder of metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String clusterUUID; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index a5ef337c3b62a..e3689d046193c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -70,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable implemen * in {@link org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse}. */ public static final String HIDE_GENERATIONS_PARAM = "hide_generations"; + public static final String HIDE_SYSTEM_REPOSITORY_SETTING = "hide_system_repository_setting"; private final List repositories; @@ -291,7 +292,7 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); } Settings settings = repository.settings(); - if (SYSTEM_REPOSITORY_SETTING.get(settings)) { + if (SYSTEM_REPOSITORY_SETTING.get(settings) && params.paramAsBoolean(HIDE_SYSTEM_REPOSITORY_SETTING, false)) { settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); } builder.startObject("settings"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java index 580643b96e411..b9d2a3edf356f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Metadata about registered repository * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RepositoryMetadata implements Writeable { private final String name; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index 8e367c71ed166..bd110c6af8975 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentHelper; @@ -58,8 +59,9 @@ * it is entirely independent from an index. It's a building block forming part of a regular index * template and a {@link ComponentTemplate}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Template extends AbstractDiffable