diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 6b86da2c91261..144a8b71fca39 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -23,4 +23,7 @@ BWC_VERSION: - "2.9.0" - "2.9.1" - "2.10.0" + - "2.10.1" - "2.11.0" + - "2.11.1" + - "2.12.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4fa118e8486f1..8076adcf00ca9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @reta @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @peternied @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami @msfroh +* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 69616e533d1ed..908a032bf833e 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,8 +17,10 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added +- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) +- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml new file mode 100644 index 0000000000000..6a66ac5fb5609 --- /dev/null +++ b/.github/workflows/assemble.yml @@ -0,0 +1,26 @@ +name: Gradle Assemble +on: [pull_request] + +jobs: + assemble: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 11 + uses: actions/setup-java@v3 + with: + java-version: 11 + distribution: temurin + - name: Setup docker (missing on MacOS) + if: runner.os == 'macos' + run: | + brew install docker + colima start + sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock + - name: Run Gradle (assemble) + run: | + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index c2a2cedaaefb4..76981276fe085 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -38,7 +38,7 @@ jobs: - name: Set hash working-directory: ./lucene run: | - echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT id: version - name: Initialize gradle settings diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index f4622859916c7..b04f404b11c55 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,4 +1,4 @@ -name: Gradle Precommit and Assemble +name: Gradle Precommit on: [pull_request] jobs: @@ -19,12 +19,3 @@ jobs: - name: Run Gradle (precommit) run: | ./gradlew javadoc precommit --parallel - - name: Setup docker (missing on MacOS) - if: runner.os == 'macos' - run: | - brew install docker - colima start - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - - name: Run Gradle (assemble) - run: | - ./gradlew assemble --parallel diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml new file mode 100644 index 0000000000000..11998e36c2dbb --- /dev/null +++ b/.github/workflows/pull-request-checks.yml @@ -0,0 +1,28 @@ +name: Pull Request Checks + +on: + pull_request: + types: + [ + opened, + edited, + review_requested, + synchronize, + reopened, + ready_for_review, + ] + +jobs: + verify-description-checklist: + name: Verify Description Checklist + runs-on: ubuntu-latest + steps: + - uses: peternied/check-pull-request-description-checklist@v1 + with: + checklist-items: | + New functionality includes testing. + All tests pass + New functionality has been documented. + New functionality has javadoc added + Commits are signed per the DCO using --signoff + Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index df785bcc70014..a20c671c137b2 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -59,7 +59,7 @@ jobs: sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} @@ -86,7 +86,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} @@ -113,7 +113,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: main diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d1ca935193c3..b40878066960a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,9 +10,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) -- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -32,7 +39,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 - Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0 - Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 - Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) @@ -45,6 +51,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -53,6 +60,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) + ### Deprecated @@ -81,52 +91,47 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) -- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) -- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) -- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) -- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) -- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) -- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) ### Dependencies -- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) -- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) -- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) -- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) -- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) -- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) -- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) -- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858)) ### Changed -- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) -- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) -- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) ### Deprecated ### Removed ### Fixed -- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) -- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) -- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) -- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) +- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d379d78829318..4a1162cf2558b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,7 @@ - [Developer Certificate of Origin](#developer-certificate-of-origin) - [Changelog](#changelog) - [Review Process](#review-process) + - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) # Contributing to OpenSearch @@ -162,3 +163,14 @@ During the PR process, expect that there will be some back-and-forth. Please try If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and usually take care of backporting it to appropriate branches ourselves. If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. + +## Troubleshooting Failing Builds + +The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. + +If your pull request reports a failing test(s) on one of the checks, please: + - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question + - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time + - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label + - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) + - as a bonus point, try to contribute by fixing the flaky test(s) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index d7bdd09ea882e..6d3e0f018657e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -103,7 +103,7 @@ dependencies { api localGroovy() api 'commons-codec:commons-codec:1.16.0' - api 'org.apache.commons:commons-compress:1.23.0' + api 'org.apache.commons:commons-compress:1.24.0' api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' @@ -114,7 +114,7 @@ dependencies { api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.5.1' + api 'de.thetaphi:forbiddenapis:3.6' api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.4' diff --git a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java index c5b4de157c75c..662510fbbf61c 100644 --- a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java @@ -45,17 +45,16 @@ /** * A standalone process that will reap external services after a build dies. - * *

Input

* Since how to reap a given service is platform and service dependent, this tool * operates on system commands to execute. It takes a single argument, a directory * that will contain files with reaping commands. Each line in each file will be * executed with {@link Runtime#exec(String)}. - * + *

* The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the * pipe will be broken and read will return. - * + *

* The reaper will then iterate over the files in the configured directory, * and execute the given commands. If any commands fail, a failure message is * written to stderr. Otherwise, the input file will be deleted. If no inputs diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index cddd03ccc2019..4d45640b75e3d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -52,15 +52,15 @@ /** * A container for opensearch supported version information used in BWC testing. - * + *

* Parse the Java source file containing the versions declarations and use the known rules to figure out which are all * the version the current one is wire and index compatible with. * On top of this, figure out which of these are unreleased and provide the branch they can be built from. - * + *

* Note that in this context, currentVersion is the unreleased version this build operates on. * At any point in time there will surely be four such unreleased versions being worked on, * thus currentVersion will be one of these. - * + *

* Considering: *

*
M, M > 0
@@ -84,7 +84,7 @@ * Each build is only concerned with versions before it, as those are the ones that need to be tested * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous * version. - * + *

* Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased * version number to server in all branches when a version is released. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java index 5ae7ad1595e2f..5259700b3a63d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java @@ -38,7 +38,7 @@ /** * Writes data passed to this stream as log messages. - * + *

* The stream will be flushed whenever a newline is detected. * Allows setting an optional prefix before each line of output. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java index 159270d28e3d6..c6e49dc44d6bd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java @@ -76,7 +76,7 @@ public InternalBwcGitPlugin(ProviderFactory providerFactory, ExecOperations exec public void apply(Project project) { this.project = project; this.gitExtension = project.getExtensions().create("bwcGitConfig", BwcGitExtension.class); - Provider remote = providerFactory.systemProperty("bwc.remote").forUseAtConfigurationTime().orElse("opensearch-project"); + Provider remote = providerFactory.systemProperty("bwc.remote").orElse("opensearch-project"); TaskContainer tasks = project.getTasks(); TaskProvider createCloneTaskProvider = tasks.register("createClone", LoggedExec.class, createClone -> { @@ -105,7 +105,6 @@ public void apply(Project project) { String remoteRepo = remote.get(); // for testing only we can override the base remote url String remoteRepoUrl = providerFactory.systemProperty("testRemoteRepo") - .forUseAtConfigurationTime() .getOrElse("https://github.com/" + remoteRepo + "/OpenSearch.git"); addRemote.setCommandLine(asList("git", "remote", "add", remoteRepo, remoteRepoUrl)); }); @@ -113,7 +112,6 @@ public void apply(Project project) { TaskProvider fetchLatestTaskProvider = tasks.register("fetchLatest", LoggedExec.class, fetchLatest -> { Provider gitFetchLatest = project.getProviders() .systemProperty("tests.bwc.git_fetch_latest") - .forUseAtConfigurationTime() .orElse("true") .map(fetchProp -> { if ("true".equals(fetchProp)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java index aa81ef75701fa..db46d2e3edc55 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java @@ -40,7 +40,7 @@ /** * Represent rules for tests enforced by the @{link {@link TestingConventionsTasks}} - * + *

* Rules are identified by name, tests must have this name as a suffix and implement one of the base classes * and be part of all the specified tasks. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java index 1423b52c443d9..e82d8ed73ced2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -61,7 +61,7 @@ /** * A custom archive task that assembles a tar archive that preserves symbolic links. - * + *

* This task is necessary because the built-in task {@link org.gradle.api.tasks.bundling.Tar} does not preserve symbolic links. */ public class SymbolicLinkPreservingTar extends Tar { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java index 728e36ce98bff..fcadf35593ce6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java @@ -65,7 +65,7 @@ * Rest YAML tests :
* When the {@link RestResourcesPlugin} has been applied the {@link CopyRestTestsTask} will copy the Rest YAML tests if explicitly * configured with `includeCore` through the `restResources.restTests` extension. - * + *

* Additionally you can specify which sourceSetName resources should be copied to. The default is the yamlRestTest source set. * @see CopyRestApiTask * @see CopyRestTestsTask diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java index 2d71b9361963b..7abf9bf5fbef6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java @@ -53,7 +53,7 @@ /** * An helper to manage a vagrant box. - * + *

* This is created alongside a {@link VagrantExtension} for a project to manage starting and * stopping a single vagrant box. */ @@ -185,7 +185,7 @@ public void setArgs(String... args) { /** * A function to translate output from the vagrant command execution to the progress line. - * + *

* The function takes the current line of output from vagrant, and returns a new * progress line, or {@code null} if there is no update. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java index 85d3e340c50e7..ca1b95183505f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java @@ -47,7 +47,7 @@ /** * A shell script to run within a vagrant VM. - * + *

* The script is run as root within the VM. */ public abstract class VagrantShellTask extends DefaultTask { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index c0e7320ba7615..8e246ff9ecd11 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -58,7 +58,7 @@ public void tearDown() { * This test is used to verify that adding the 'opensearch.pluginzip' to the project * adds some other transitive plugins and tasks under the hood. This is basically * a behavioral test of the {@link Publish#apply(Project)} method. - * + *

* This is equivalent of having a build.gradle script with just the following section: *

      *     plugins {
@@ -202,7 +202,7 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("useDefaultValues.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and ZIP_PUBLISH_TASK tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -277,7 +277,7 @@ public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -312,7 +312,7 @@ public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPull
         GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -348,7 +348,7 @@ public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -395,7 +395,7 @@ public void customizedGroupValue() throws IOException, URISyntaxException, XmlPu
         GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
index b64c719440733..def5248c1f255 100644
--- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
+++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
@@ -36,7 +36,7 @@
 
 /**
  * Filter out threads controlled by gradle that may be created during unit tests.
- *
+ * 

* Currently this includes pooled threads for Exec as well as file system event watcher threads. */ public class GradleThreadsFilter implements ThreadFilter { diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java index 163a903d31832..1a2e36aa78e9f 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java @@ -43,7 +43,7 @@ /** * Backwards compatible test* method provider (public, non-static). - * + *

* copy of org.apache.lucene.util.LuceneJUnit3MethodProvider to avoid a dependency between build and test fw. */ public final class JUnit3MethodProvider implements TestMethodProvider { diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index cb8050d1718c4..74c88e0961c9c 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -15,8 +15,9 @@ plugins { repositories { mavenCentral() } + dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.20.0" + implementation "org.apache.logging.log4j:log4j-core:2.21.0" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 71b3e267700b1..96d398c35851d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.8.0-snapshot-95cdd2e +lucene = 9.8.0 bundled_jdk_vendor = adoptium bundled_jdk = 20.0.2+9 @@ -14,9 +14,9 @@ jackson_databind = 2.15.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 -log4j = 2.20.0 +log4j = 2.21.0 slf4j = 1.7.36 -asm = 9.5 +asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.97.Final +netty = 4.1.100.Final joda = 2.12.2 # client dependencies @@ -68,4 +68,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.30.1 +opentelemetry = 1.31.0 +opentelemetrysemconv = 1.21.0-alpha diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java index e53e4f1ad692d..9cd12f5e78bd0 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java @@ -37,7 +37,7 @@ /** * Stores measurement samples. - * + *

* This class is NOT threadsafe. */ public final class SampleRecorder { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index 5bd5a5d0e308e..eb0a8b0e8f40a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -170,8 +170,8 @@ public ClusterHealthResponse health(ClusterHealthRequest healthRequest, RequestO /** * Asynchronously get cluster health using the Cluster Health API. - * * If timeout occurred, {@link ClusterHealthResponse} will have isTimedOut() == true and status() == RestStatus.REQUEST_TIMEOUT + * * @param healthRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 61a202d25167f..35d9929a649ff 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -451,9 +451,9 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.withIndicesOptions(searchRequest.indicesOptions()); } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - /** - * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - - * refer to org.opensearch.action.search.SearchResponseMerger + /* + Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + refer to org.opensearch.action.search.SearchResponseMerger */ if (searchRequest.pointInTimeBuilder() != null) { params.putParam("ccs_minimize_roundtrips", "false"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index dad5b6a3679ec..d40445b2daa81 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -37,7 +37,7 @@ /** * A base request for any requests that supply timeouts. - * + *

* Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java index 7805a7853b003..62c5b54c0e75e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java @@ -156,7 +156,7 @@ public MediaType mappingsMediaType() { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -168,7 +168,7 @@ public CreateIndexRequest mapping(String source, MediaType mediaType) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -179,7 +179,7 @@ public CreateIndexRequest mapping(XContentBuilder source) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -196,7 +196,7 @@ public CreateIndexRequest mapping(Map source) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -282,7 +282,7 @@ public CreateIndexRequest aliases(Collection aliases) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -291,7 +291,7 @@ public CreateIndexRequest source(String source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(XContentBuilder source) { @@ -300,7 +300,7 @@ public CreateIndexRequest source(XContentBuilder source) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { @@ -311,7 +311,7 @@ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ @SuppressWarnings("unchecked") diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java index 6d7e95d191ba6..a63393bd2341b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java @@ -105,7 +105,7 @@ public MediaType mediaType() { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(Map mappingSource) { @@ -120,7 +120,7 @@ public PutMappingRequest source(Map mappingSource) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(String mappingSource, MediaType mediaType) { @@ -131,7 +131,7 @@ public PutMappingRequest source(String mappingSource, MediaType mediaType) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(XContentBuilder builder) { @@ -142,7 +142,7 @@ public PutMappingRequest source(XContentBuilder builder) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(BytesReference source, MediaType mediaType) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java index c419884700587..9129de717459f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java @@ -38,7 +38,6 @@ /** * Client side counterpart of server side version. - * * {@link org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup} */ public class TaskGroup { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java index 51ac62830446f..75badc4e3dbf2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java @@ -54,6 +54,7 @@ public class TaskInfo { private long runningTimeNanos; private boolean cancellable; private boolean cancelled; + private Long cancellationStartTime; private TaskId parentTaskId; private final Map status = new HashMap<>(); private final Map headers = new HashMap<>(); @@ -127,6 +128,14 @@ void setCancelled(boolean cancelled) { this.cancelled = cancelled; } + public Long getCancellationStartTime() { + return this.cancellationStartTime; + } + + public void setCancellationStartTime(Long cancellationStartTime) { + this.cancellationStartTime = cancellationStartTime; + } + public TaskId getParentTaskId() { return parentTaskId; } @@ -180,6 +189,7 @@ private void noOpParse(Object s) {} parser.declareString(TaskInfo::setParentTaskId, new ParseField("parent_task_id")); parser.declareObject(TaskInfo::setHeaders, (p, c) -> p.mapStrings(), new ParseField("headers")); parser.declareObject(TaskInfo::setResourceStats, (p, c) -> p.map(), new ParseField("resource_stats")); + parser.declareLong(TaskInfo::setCancellationStartTime, new ParseField("cancellation_time_millis")); PARSER = (XContentParser p, Void v, String name) -> parser.parse(p, new TaskInfo(new TaskId(name)), null); } @@ -199,7 +209,8 @@ && isCancelled() == taskInfo.isCancelled() && Objects.equals(getParentTaskId(), taskInfo.getParentTaskId()) && Objects.equals(status, taskInfo.status) && Objects.equals(getHeaders(), taskInfo.getHeaders()) - && Objects.equals(getResourceStats(), taskInfo.getResourceStats()); + && Objects.equals(getResourceStats(), taskInfo.getResourceStats()) + && Objects.equals(getCancellationStartTime(), taskInfo.cancellationStartTime); } @Override @@ -216,7 +227,8 @@ public int hashCode() { getParentTaskId(), status, getHeaders(), - getResourceStats() + getResourceStats(), + getCancellationStartTime() ); } @@ -250,6 +262,8 @@ public String toString() { + headers + ", resource_stats=" + resourceStats + + ", cancellationStartTime=" + + cancellationStartTime + '}'; } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java index 49bcb61b2dc3d..c464ee9ece74a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java @@ -44,7 +44,7 @@ /** * Base class for HLRC request parsing tests. - * + *

* This case class facilitates generating client side request test instances and * verifies that they are correctly parsed into server side request instances. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java index 27704b01560c4..7d2d6b87b85c6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java @@ -44,7 +44,7 @@ /** * Base class for HLRC response parsing tests. - * + *

* This case class facilitates generating server side response test instances and * verifies that they are correctly parsed into HLRC response instances. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java index b7f6328b3c88e..3678cc042ba47 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java @@ -180,7 +180,7 @@ private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + *

* This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index abb2d75aea751..ce080b45273b4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -137,15 +137,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index d14759065b5eb..28909cf58541a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -65,15 +65,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IngestClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index 50bcf79642eac..d0015db044843 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -90,15 +90,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index 6916ae11556e2..2e2d15df5392a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java index 03e267aafd1b7..cbac0b8c97d9c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/{@link TasksClientDocumentationIT}.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java index 1f747dc139d15..edb4d16c6d992 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java @@ -44,7 +44,7 @@ public class RandomCreateIndexGenerator { /** * Returns a random {@link CreateIndexRequest}. - * + *

* Randomizes the index name, the aliases, mappings and settings associated with the * index. When present, the mappings make no mention of types. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java index 835a93b5b09ce..faf5024d0c173 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java @@ -84,6 +84,10 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns for (int i = 0; i < 4; i++) { boolean cancellable = randomBoolean(); boolean cancelled = cancellable == true ? randomBoolean() : false; + Long cancellationStartTime = null; + if (cancelled) { + cancellationStartTime = randomNonNegativeLong(); + } tasks.add( new org.opensearch.tasks.TaskInfo( new TaskId(NODE_ID, (long) i), @@ -97,7 +101,8 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns cancelled, new TaskId("node1", randomLong()), Collections.singletonMap("x-header-of", "some-value"), - null + null, + cancellationStartTime ) ); } @@ -135,6 +140,7 @@ protected void assertInstances( assertEquals(ti.isCancelled(), taskInfo.isCancelled()); assertEquals(ti.getParentTaskId().getNodeId(), taskInfo.getParentTaskId().getNodeId()); assertEquals(ti.getParentTaskId().getId(), taskInfo.getParentTaskId().getId()); + assertEquals(ti.getCancellationStartTime(), taskInfo.getCancellationStartTime()); FakeTaskStatus status = (FakeTaskStatus) ti.getStatus(); assertEquals(status.code, taskInfo.getStatus().get("code")); assertEquals(status.status, taskInfo.getStatus().get("status")); diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index e819fa27a8939..7691c01daefea 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -310,7 +310,7 @@ public boolean isRunning() { * they will be retried). In case of failures all of the alive nodes (or * dead nodes that deserve a retry) are retried until one responds or none * of them does, in which case an {@link IOException} will be thrown. - * + *

* This method works by performing an asynchronous call and waiting * for the result. If the asynchronous call throws an exception we wrap * it and rethrow it so that the stack trace attached to the exception diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index b2807d35d230e..42c31864e0578 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -89,15 +89,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/RestClientDocumentation.java[example] * -------------------------------------------------- - * + *

* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 440e9a2ea5cd1..8a4ca1fb0a136 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -49,15 +49,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnifferDocumentation.java[example] * -------------------------------------------------- - * + *

* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 7914fcc172ef4..cb05661dc74a4 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.4.0" + id "com.netflix.nebula.ospackage-base" version "11.5.0" } void addProcessFilesTask(String type, boolean jdk) { @@ -213,7 +213,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 02750 + dirMode 0750 into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,7 +223,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 02750 + dirMode 0750 fileMode 0660 permissionGroup 'opensearch' includeEmptyDirs true @@ -281,8 +281,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { dirMode mode } } - copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 02750) - copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 02750) + copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) + copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 0750) copyEmptyDir('/usr/share/opensearch/plugins', 'root', 'root', 0755) into '/usr/share/opensearch' diff --git a/distribution/packages/src/deb/lintian/opensearch b/distribution/packages/src/deb/lintian/opensearch index 854b23131ecbc..e6db8e8c6b322 100644 --- a/distribution/packages/src/deb/lintian/opensearch +++ b/distribution/packages/src/deb/lintian/opensearch @@ -15,11 +15,11 @@ missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable non-standard-file-perm etc/default/opensearch 0660 != 0644 -non-standard-dir-perm etc/opensearch/ 2750 != 0755 -non-standard-dir-perm etc/opensearch/jvm.options.d/ 2750 != 0755 +non-standard-dir-perm etc/opensearch/ 0750 != 0755 +non-standard-dir-perm etc/opensearch/jvm.options.d/ 0750 != 0755 non-standard-file-perm etc/opensearch/* -non-standard-dir-perm var/lib/opensearch/ 2750 != 0755 -non-standard-dir-perm var/log/opensearch/ 2750 != 0755 +non-standard-dir-perm var/lib/opensearch/ 0750 != 0755 +non-standard-dir-perm var/log/opensearch/ 0750 != 0755 executable-is-not-world-readable etc/init.d/opensearch 0750 non-standard-file-permissions-for-etc-init.d-script etc/init.d/opensearch 0750 != 0755 diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 1d2cfe7eccae6..b7ab2e1c2309b 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -121,3 +121,9 @@ ${path.logs} # index searcher threadpool. # #opensearch.experimental.feature.concurrent_segment_search.enabled: false +# +# +# Gates the optimization of datetime formatters caching along with change in default datetime formatter +# Once there is no observed impact on performance, this feature flag can be removed. +# +#opensearch.experimental.optimization.datetime_formatter_caching.enabled: false diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index aa3dfbe39ee96..726c381db09f6 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -101,15 +101,15 @@ private static String maybeShowCodeDetailsInExceptionMessages() { } private static String javaLocaleProviders() { - /** - * SPI setting is used to allow loading custom CalendarDataProvider - * in jdk8 it has to be loaded from jre/lib/ext, - * in jdk9+ it is already within ES project and on a classpath - * - * Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - * parsing will break in an incompatible way for some date patterns and locales. - * //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - * See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider + /* + SPI setting is used to allow loading custom CalendarDataProvider + in jdk8 it has to be loaded from jre/lib/ext, + in jdk9+ it is already within ES project and on a classpath + + Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date + parsing will break in an incompatible way for some date patterns and locales. + //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 + See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ return "-Djava.locale.providers=SPI,COMPAT"; } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 2db3fef55d02e..b61a00aba04bc 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -38,7 +38,7 @@ dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.3" + api "org.bouncycastle:bc-fips:1.0.2.4" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 deleted file mode 100644 index c71320050b7de..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da62b32cb72591f5b4d322e6ab0ce7de3247b534 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 new file mode 100644 index 0000000000000..da37449f80d7e --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 @@ -0,0 +1 @@ +9008d04fc13da6455e6a792935b93b629757335d \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index 66f43b1e30d28..838d6e22a37bd 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -107,7 +107,7 @@ /** * A command for the plugin cli to install a plugin into opensearch. - * + *

* The install command takes a plugin id, which may be any of the following: *

    *
  • An official opensearch plugin name
  • @@ -411,7 +411,7 @@ private String getMavenUrl(Terminal terminal, String[] coordinates, String platf /** * Returns {@code true} if the given url exists, and {@code false} otherwise. - * + *

    * The given url must be {@code https} and existing means a {@code HEAD} request returns 200. */ // pkg private for tests to manipulate @@ -698,7 +698,6 @@ InputStream getPublicKey() { /** * Creates a URL and opens a connection. - * * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. */ // pkg private for tests diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java index 579f676631a5a..02be3dbc82a44 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java @@ -41,7 +41,7 @@ * The listener is triggered whenever a full percent is increased * The listener is never triggered twice on the same percentage * The listener will always return 99 percent, if the expectedTotalSize is exceeded, until it is finished - * + *

    * Only used by the InstallPluginCommand, thus package private here */ abstract class ProgressInputStream extends FilterInputStream { diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java index b7dcbd50cf781..708f644bcdeb6 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java @@ -17,7 +17,7 @@ * An interface for an upgrade task, which in this instance is an unit of * operation that is part of the overall upgrade process. This extends the * {@link java.util.function.Consumer} interface. - * + *

    * The implementing tasks consume and instance of a tuple of {@link TaskInput} * and {@link Terminal} and operate via side effects. * diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index e6122e7baf91a..e1ad55fe4b60b 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -45,7 +45,7 @@ * It isn't recursive, just ignores exactly the elements you tell it. * Has option --missing-method to apply "method" level to selected packages (fix one at a time). * Matches package names exactly: so you'll need to list subpackages separately. - * + *

    * Note: This by default ignores javadoc validation on overridden methods. */ // Original version of this class is ported from MissingDoclet code in Lucene, diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f01f0a84a786a..adfb521550eb9 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=bb09982fdf52718e4c7b25023d10df6d35a5fff969860bdf5a5bd27a3ab27a9e +distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 diff --git a/gradlew b/gradlew index 0adc8e1a53214..1aa94a4269074 100755 --- a/gradlew +++ b/gradlew @@ -145,7 +145,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac @@ -153,7 +153,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then '' | soft) :;; #( *) # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -202,11 +202,11 @@ fi # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ diff --git a/libs/cli/src/main/java/org/opensearch/cli/Command.java b/libs/cli/src/main/java/org/opensearch/cli/Command.java index eed5c4ba4ee6f..cc9230bdb2282 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Command.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Command.java @@ -162,7 +162,7 @@ protected static void exit(int status) { /** * Executes this command. - * + *

    * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */ protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; diff --git a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java index be030c18507ad..fb1097178e5a3 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java @@ -44,7 +44,7 @@ /** * A Terminal wraps access to reading input and writing output for a cli. - * + *

    * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line * of text. Printing is also gated by the {@link Verbosity} of the terminal, diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java index c4ba778e7db86..fc5e364241d12 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java +++ b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java @@ -104,7 +104,7 @@ public static void checkJarHell(Consumer output) throws IOException, URI /** * Parses the classpath into an array of URLs - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ public static Set parseClassPath() { @@ -114,7 +114,7 @@ public static Set parseClassPath() { /** * Parses the classpath into a set of URLs. For testing. * @param classPath classpath to parse (typically the system property {@code java.class.path}) - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ @SuppressForbidden(reason = "resolves against CWD because that is how classpaths work") diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index 66e079c461e75..e060baf6f187e 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -38,7 +38,7 @@ * Holds a value that is either: * a) set implicitly e.g. through some default value * b) set explicitly e.g. from a user selection - * + *

    * When merging conflicting configuration settings such as * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. diff --git a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java index 7e89641927ed5..eb7b331c9aa24 100644 --- a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java +++ b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java @@ -514,7 +514,7 @@ public boolean anyMoveBackToPreviousDay() { * Builds an array that can be {@link Arrays#binarySearch(long[], long)}ed * for the daylight savings time transitions. * - * @openearch.internal + * @opensearch.internal */ private static class TransitionArrayLookup extends AbstractManyTransitionsLookup { private final LocalTimeOffset[] offsets; diff --git a/libs/common/src/main/java/org/opensearch/common/SetOnce.java b/libs/common/src/main/java/org/opensearch/common/SetOnce.java index a596b5fcdb61d..778926ce108b7 100644 --- a/libs/common/src/main/java/org/opensearch/common/SetOnce.java +++ b/libs/common/src/main/java/org/opensearch/common/SetOnce.java @@ -35,7 +35,7 @@ * A convenient class which offers a semi-immutable object wrapper implementation which allows one * to set the value of an object exactly once, and retrieve it many times. If {@link #set(Object)} * is called more than once, {@link AlreadySetException} is thrown and the operation will fail. - * + *

    * This is borrowed from lucene's experimental API. It is not reused to eliminate the dependency * on lucene core for such a simple (standalone) utility class that may change beyond OpenSearch needs. * diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index d0b94536b0729..5c0e3f2de7708 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -34,7 +34,6 @@ /** * Java 9 Tuple - * * todo: deprecate and remove w/ min jdk upgrade to 11? * * @opensearch.internal diff --git a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java index b3526859933ec..ed8d50892b74a 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java +++ b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java @@ -93,7 +93,7 @@ public static Path get(URI uri) { /** * Tries to resolve the given path against the list of available roots. - * + *

    * If path starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, String path) { @@ -109,7 +109,7 @@ public static Path get(Path[] roots, String path) { /** * Tries to resolve the given file uri against the list of available roots. - * + *

    * If uri starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, URI uri) { diff --git a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java index a4fbc6cb65b0d..0f289c09bbae2 100644 --- a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java +++ b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java @@ -368,7 +368,7 @@ public static InetAddress forString(String ipString) { /** * Convert a byte array into an InetAddress. - * + *

    * {@link InetAddress#getByAddress} is documented as throwing a checked * exception "if IP address is of illegal length." We replace it with * an unchecked exception, for use by callers who already know that addr @@ -423,7 +423,7 @@ public static Tuple parseCidr(String maskedAddress) { /** * Given an address and prefix length, returns the string representation of the range in CIDR notation. - * + *

    * See {@link #toAddrString} for details on how the address is represented. */ public static String toCidrString(InetAddress address, int prefixLength) { diff --git a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java index a3fcffb1d6a4c..30ed5bf63a748 100644 --- a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java +++ b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java @@ -224,10 +224,10 @@ public double getDaysFrac() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + *

    * Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. - * + *

    * Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) @@ -239,12 +239,12 @@ public String toString() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + *

    * Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. The number of * fractional decimals (up to 10 maximum) are truncated to the number of fraction pieces * specified. - * + *

    * Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) diff --git a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java index 8762217916c7a..d6ea4fa359df3 100644 --- a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java +++ b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java @@ -25,9 +25,9 @@ /** * Bit mixing utilities from carrotsearch.hppc. - * + *

    * Licensed under ALv2. This is pulled in directly to avoid a full hppc dependency. - * + *

    * The purpose of these methods is to evenly distribute key space over int32 * range. */ @@ -111,7 +111,7 @@ public static int mix32(int k) { /** * Computes David Stafford variant 9 of 64bit mix function (MH3 finalization step, * with different shifts and constants). - * + *

    * Variant 9 is picked because it contains two 32-bit shifts which could be possibly * optimized into better machine code. * diff --git a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 b/libs/core/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.21.0.jar.sha1 b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 70baf1270cd5d..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Build.java b/libs/core/src/main/java/org/opensearch/Build.java index 67a50a8a31a0e..b5d67f5501725 100644 --- a/libs/core/src/main/java/org/opensearch/Build.java +++ b/libs/core/src/main/java/org/opensearch/Build.java @@ -216,7 +216,7 @@ public String getDistribution() { /** * Get the version as considered at build time - * + *

    * Offers a way to get the fully qualified version as configured by the build. * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) * or -SNAPSHOT for others. diff --git a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java index 32eae654cf975..5d8e067a8fd8b 100644 --- a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java +++ b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java @@ -40,7 +40,7 @@ /** * The Contents of this file were originally moved from {@link Version}. - * + *

    * This class keeps all the supported OpenSearch predecessor versions for * backward compatibility purpose. * diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index 5bad711a15032..cce86b452f698 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -168,7 +168,7 @@ public OpenSearchException(Throwable cause) { /** * Construct a OpenSearchException with the specified detail message. - * + *

    * The message can be parameterized using {} as placeholders for the given * arguments * @@ -182,7 +182,7 @@ public OpenSearchException(String msg, Object... args) { /** * Construct a OpenSearchException with the specified detail message * and nested exception. - * + *

    * The message can be parameterized using {} as placeholders for the given * arguments * @@ -587,7 +587,7 @@ public static OpenSearchException innerFromXContent(XContentParser parser, boole * Static toXContent helper method that renders {@link OpenSearchException} or {@link Throwable} instances * as XContent, delegating the rendering to {@link OpenSearchException#toXContent(XContentBuilder, ToXContent.Params)} * or {@link #innerToXContent(XContentBuilder, ToXContent.Params, Throwable, String, String, Map, Map, Throwable)}. - * + *

    * This method is usually used when the {@link Throwable} is rendered as a part of another XContent object, and its result can * be parsed back using the {@code OpenSearchException.fromXContent(XContentParser)} method. */ @@ -606,7 +606,7 @@ public static void generateThrowableXContent(XContentBuilder builder, ToXContent * depends on the value of the "detailed" parameter: when it's false only a simple message based on the type and message of the * exception is rendered. When it's true all detail are provided including guesses root causes, cause and potentially stack * trace. - * + *

    * This method is usually used when the {@link Exception} is rendered as a full XContent object, and its output can be parsed * by the {@code #OpenSearchException.failureFromXContent(XContentParser)} method. */ diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 32f4ca0317907..8d9ee73a02c1d 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -94,7 +94,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_9_1 = new Version(2090199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java index 119e56cfe0bf2..4fd55898a2cb5 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java @@ -154,9 +154,9 @@ static ActionListener wrap(Runnable runnable) { /** * Creates a listener that wraps another listener, mapping response values via the given mapping function and passing along * exceptions to the delegate. - * + *

    * Notice that it is considered a bug if the listener's onResponse or onFailure fails. onResponse failures will not call onFailure. - * + *

    * If the function fails, the listener's onFailure handler will be called. The principle is that the mapped listener will handle * exceptions from the mapping function {@code fn} but it is the responsibility of {@code delegate} to handle its own exceptions * inside `onResponse` and `onFailure`. @@ -334,7 +334,7 @@ protected void innerOnFailure(Exception e) { /** * Completes the given listener with the result from the provided supplier accordingly. * This method is mainly used to complete a listener with a block of synchronous code. - * + *

    * If the supplier fails, the listener's onFailure handler will be called. * It is the responsibility of {@code delegate} to handle its own exceptions inside `onResponse` and `onFailure`. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/Strings.java b/libs/core/src/main/java/org/opensearch/core/common/Strings.java index 6227716af9cc9..8fdec670bd9f2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/Strings.java +++ b/libs/core/src/main/java/org/opensearch/core/common/Strings.java @@ -38,7 +38,7 @@ /** * String utility class. - * + *

    * TODO replace Strings in :server * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java index 53915a3da824c..1a48abee2dbf8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java @@ -45,7 +45,7 @@ /** * A composite {@link BytesReference} that allows joining multiple bytes references * into one without copying. - * + *

    * Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java index a50d1c165ed72..30c84708728ef 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java @@ -17,7 +17,7 @@ * {@link StreamInput} version of Lucene's {@link org.apache.lucene.store.ByteArrayDataInput} * This is used as a replacement of Lucene ByteArrayDataInput for abstracting byte order changes * in Lucene's API - * + *

    * Attribution given to apache lucene project under ALv2: * * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java index abac76c8b6c27..123b52eb92876 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java @@ -43,7 +43,7 @@ /** * A registry for {@link Writeable.Reader} readers of {@link NamedWriteable}. - * + *

    * The registration is keyed by the combination of the category class of {@link NamedWriteable}, and a name unique * to that category. * diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index ece2012302919..3e996bdee83a2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -96,7 +96,7 @@ /** * A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing. - * + *

    * This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamOutput}. Finally @@ -1128,7 +1128,7 @@ public C readNamedWriteable(@SuppressWarnings("unused * the corresponding entry in the registry by name, so that the proper object can be read and returned. * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. * Use {@link FilterInputStream} instead which wraps a stream and supports a {@link NamedWriteableRegistry} too. - * + *

    * Prefer {@link StreamInput#readNamedWriteable(Class)} and {@link StreamOutput#writeNamedWriteable(NamedWriteable)} unless you * have a compelling reason to use this method instead. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index 94b813246bc7e..2d69e1c686df3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -88,7 +88,7 @@ /** * A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing. - * + *

    * This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamInput}. Finally diff --git a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java index 322300a554284..45ee72f558724 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java +++ b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java @@ -50,7 +50,7 @@ public final class SecureString implements CharSequence, Closeable { /** * Constructs a new SecureString which controls the passed in char array. - * + *

    * Note: When this instance is closed, the array will be zeroed out. */ public SecureString(char[] chars) { @@ -59,7 +59,7 @@ public SecureString(char[] chars) { /** * Constructs a new SecureString from an existing String. - * + *

    * NOTE: This is not actually secure, since the provided String cannot be deallocated, but * this constructor allows for easy compatibility between new and old apis. * diff --git a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java index c15db75d06d49..49eadbbb2bc00 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java @@ -45,7 +45,7 @@ * A {@code SizeUnit} does not maintain size information, but only * helps organize and use size representations that may be maintained * separately across various contexts. - * + *

    * It use conventional data storage values (base-2) : *

      *
    • 1KB = 1024 bytes
    • diff --git a/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java index 27d5b5dfdfa15..5324ea6151e51 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java @@ -43,7 +43,7 @@ /** * Compressor interface used for compressing {@link org.opensearch.core.xcontent.MediaType} and * {@code org.opensearch.repositories.blobstore.BlobStoreRepository} implementations. - * + *

      * This is not to be confused with {@link org.apache.lucene.codecs.compressing.Compressor} which is used * for codec implementations such as {@code org.opensearch.index.codec.customcodecs.Lucene95CustomCodec} * for compressing {@link org.apache.lucene.document.StoredField}s diff --git a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java index 9290254c30d8d..af09a7aebba79 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java @@ -23,7 +23,7 @@ /** * A registry that wraps a static Map singleton which holds a mapping of unique String names (typically the * compressor header as a string) to registerd {@link Compressor} implementations. - * + *

      * This enables plugins, modules, extensions to register their own compression implementations through SPI * * @opensearch.experimental @@ -105,7 +105,7 @@ public static Compressor getCompressor(final String name) { /** * Returns the registered compressors as an Immutable collection - * + *

      * note: used for testing */ public static Map registeredCompressors() { diff --git a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java index 019e282444d64..9b806618fe0a0 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java @@ -18,7 +18,7 @@ /** * Service Provider Interface for plugins, modules, extensions providing custom * compression algorithms - * + *

      * see {@link Compressor} for implementing methods * and {@link org.opensearch.core.compress.CompressorRegistry} for the registration of custom * Compressors diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index b5ee482bcd952..a927179114188 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -48,7 +48,7 @@ /** * A value class representing the basic required properties of an OpenSearch index. - * + *

      * (This class is immutable.) * * @opensearch.api @@ -116,7 +116,7 @@ public String getUUID() { /** * Returns either the name and unique identifier of the index * or only the name if the uuid is {@link Strings#UNKNOWN_UUID_VALUE}. - * + *

      * If we have a uuid we put it in the toString so it'll show up in logs * which is useful as more and more things use the uuid rather * than the name as the lookup key for the index. diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index 984434190b486..c0abad7ed727f 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -143,7 +143,7 @@ public String toString() { /** * Parse the string representation of this shardId back to an object. - * + *

      * We lose index uuid information here, but since we use toString in * rest responses, this is the best we can do to reconstruct the object * on the client side. diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java index a0e2a54fce91c..79e531a542026 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java @@ -108,7 +108,7 @@ public abstract void declareNamedObject( * * Unlike the other version of this method, "ordered" mode (arrays of * objects) is not supported. - * + *

      * See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -163,7 +163,7 @@ public abstract void declareNamedObjects( * the order sent but tools that generate json are free to put object * members in an unordered Map, jumbling them. Thus, if you care about order * you can send the object in the second way. - * + *

      * See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -366,10 +366,10 @@ public void declareFieldArray( /** * Declares a set of fields that are required for parsing to succeed. Only one of the values * provided per String[] must be matched. - * + *

      * E.g. declareRequiredFieldSet("foo", "bar"); means at least one of "foo" or * "bar" fields must be present. If neither of those fields are present, an exception will be thrown. - * + *

      * Multiple required sets can be configured: * *

      
      @@ -379,7 +379,7 @@ public  void declareFieldArray(
            *
            * requires that one of "foo" or "bar" fields are present, and also that one of "bizz" or
            * "buzz" fields are present.
      -     *
      +     * 

      * In JSON, it means any of these combinations are acceptable: * *

        @@ -415,12 +415,12 @@ public void declareFieldArray( /** * Declares a set of fields of which at most one must appear for parsing to succeed - * + *

        * E.g. declareExclusiveFieldSet("foo", "bar"); means that only one of 'foo' * or 'bar' must be present, and if both appear then an exception will be thrown. Note * that this does not make 'foo' or 'bar' required - see {@link #declareRequiredFieldSet(String...)} * for required fields. - * + *

        * Multiple exclusive sets may be declared * * @param exclusiveSet a set of field names, at most one of which must appear diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java index 254c340f8836f..0a5cda324ddb7 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java @@ -277,7 +277,7 @@ public Token currentToken() { /** * field name that the child element needs to inherit. - * + *

        * In most cases this is the same as currentName() except with embedded arrays. In "foo": [[42]] the first START_ARRAY * token will have the name "foo", but the second START_ARRAY will have no name. */ diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java index a38bdd049ee88..976f353100c55 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java @@ -726,7 +726,7 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce /** * Writes the binary content of the given byte array as UTF-8 bytes. - * + *

        * Use {@link XContentParser#charBuffer()} to read the value back */ public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java index 0535da1a584be..9b13ebb23be86 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java @@ -37,7 +37,7 @@ /** * This interface provides a way for non-JDK classes to plug in a way to serialize to xcontent. - * + *

        * It is greatly preferred that you implement {@link ToXContentFragment} * in the class for encoding, however, in some situations you may not own the * class, in which case you can add an implementation here for encoding it. @@ -63,7 +63,7 @@ public interface XContentBuilderExtension { * Used for plugging in a human readable version of a class's encoding. It is assumed that * the human readable equivalent is always behind the {@code toString()} method, so * this transformer returns the raw value to be used. - * + *

        * An example implementation: * *

        @@ -79,7 +79,7 @@ public interface XContentBuilderExtension {
             /**
              * Used for plugging a transformer for a date or time type object into a String (or other
              * encodable object).
        -     *
        +     * 

        * For example: * *

        diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
        index a2f16209a5b7f..4bfd47ccfdc94 100644
        --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
        +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
        @@ -44,7 +44,7 @@
         
         /**
          * Interface for pull - parsing {@link XContent} see {@code XContentType} for supported types.
        - *
        + * 

        * To obtain an instance of this class use the following pattern: * *

        @@ -202,11 +202,11 @@  Map map(Supplier> mapFactory, CheckedFunction
              * Default implementation simply returns false since only actual
              * implementation class has knowledge of its internal buffering
              * state.
        -     *
        +     * 

        * This method shouldn't be used to check if the token contains text or not. */ boolean hasTextCharacters(); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java index 13e2f6a695d1b..b10be393f9adb 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java @@ -142,10 +142,10 @@ public static Object parseFieldsValue(XContentParser parser) throws IOException * This method expects that the current field name is the concatenation of a type, a delimiter and a name * (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, * "#" is the delimiter and "foo" the name of the object to parse). - * + *

        * It also expected that following this field name is either an Object or an array xContent structure and * the cursor points to the start token of this structure. - * + *

        * The method splits the field's name to extract the type and name and then parses the object * using the {@link XContentParser#namedObject(Class, String, Object)} method. * diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java index d1cdda4aeb8be..337cf9f95fe5f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java @@ -43,7 +43,7 @@ /** * Wrapper for a XContentParser that makes a single object/array look like a complete document. - * + *

        * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be * used for parsing objects that should be ignored if they are malformed. diff --git a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java index 9861847c9e1ea..828d4b7de450e 100644 --- a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java @@ -194,26 +194,25 @@ public DissectParser(String pattern, String appendSeparator) { * @throws DissectException if unable to dissect a pair into it's parts. */ public Map parse(String inputString) { - /** - * - * This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against - * another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes - * of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match - * all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for - * (the delimiter) is generally small and rare the naive approach is efficient. - * - * In this case the string that is walked is the input string, and the string being searched for is the current delimiter. - * For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the - * input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered - * list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. - * - * There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should - * results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of - * {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. - * However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key - * without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and - * b=bar. - * + /* + + This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against + another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes + of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match + all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for + (the delimiter) is generally small and rare the naive approach is efficient. + + In this case the string that is walked is the input string, and the string being searched for is the current delimiter. + For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the + input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered + list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. + + There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should + results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of + {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. + However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key + without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and + b=bar. */ DissectMatch dissectMatch = new DissectMatch(appendSeparator, maxMatches, maxResults, appendCount, referenceCount); Iterator it = matchPairs.iterator(); @@ -232,7 +231,10 @@ public Map parse(String inputString) { int lookAheadMatches; // start walking the input string byte by byte, look ahead for matches where needed // if a match is found jump forward to the end of the match - for (; i < input.length; i++) { + while (i < input.length) { + // start is only used to record the value of i + int start = i; + lookAheadMatches = 0; // potential match between delimiter and input string if (delimiter.length > 0 && input[i] == delimiter[0]) { @@ -284,8 +286,14 @@ public Map parse(String inputString) { delimiter = dissectPair.getDelimiter().getBytes(StandardCharsets.UTF_8); // i is always one byte after the last found delimiter, aka the start of the next value valueStart = i; + } else { + i++; } + } else { + i++; } + // i should change anyway + assert (i != start); } // the last key, grab the rest of the input (unless consecutive delimiters already grabbed the last key) // and there is no trailing delimiter diff --git a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java index cf1dce8966e1f..c05f316b53b9c 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java @@ -180,6 +180,7 @@ public int hashCode() { /** * Visit this circle with a {@link GeometryVisitor}. + * * @param visitor The visitor * @param The return type of the visitor * @param The exception type of the visitor diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java index 664e7e68d96a5..c946cc2473202 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java @@ -48,8 +48,8 @@ public class BitUtil { // magic numbers for bit interleaving /** * Interleaves the first 32 bits of each long value - * - * Adapted from: http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN + *

        + * Adapted from: bithacks.html#InterleaveBMN */ public static long interleave(int even, int odd) { long v1 = 0x00000000FFFFFFFFL & even; diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java index 8b3b841e221e5..33c423e136613 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java @@ -39,12 +39,12 @@ /** * Utilities for converting to/from the GeoHash standard - * + *

        * The geohash long format is represented as lon/lat (x/y) interleaved with the 4 least significant bits * representing the level (1-12) [xyxy...xyxyllll] - * + *

        * This differs from a morton encoded value which interleaves lat/lon (y/x). - * + *

        * NOTE: this will replace {@code org.opensearch.common.geo.GeoHashUtils} */ public class Geohash { diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index cd786b74be039..7aa3347ba4f4b 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -151,7 +151,7 @@ private void validatePatternBank() { /** * Checks whether patterns reference each other in a circular manner and, if so, fail with an exception. * Also checks for malformed pattern definitions and fails with an exception. - * + *

        * In a pattern, anything between %{ and } or : is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. diff --git a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java index 5c7eaca2a634a..d5b7566ecc90f 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java +++ b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java @@ -44,7 +44,7 @@ * Protects against long running operations that happen between the register and unregister invocations. * Threads that invoke {@link #register(Matcher)}, but take too long to invoke the {@link #unregister(Matcher)} method * will be interrupted. - * + *

        * This is needed for Joni's {@link org.joni.Matcher#search(int, int, int)} method, because * it can end up spinning endlessly if the regular expression is too complex. Joni has checks * that for every 30k iterations it checks if the current thread is interrupted and if so diff --git a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java index 797dfe859fa6c..0e29661978716 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java @@ -116,10 +116,10 @@ protected void handleException(Exception e) { /** * Schedules a channel to be closed by the selector event loop with which it is registered. - * + *

        * If the channel is open and the state can be transitioned to closed, the close operation will * be scheduled with the event loop. - * + *

        * Depending on the underlying protocol of the channel, a close operation might simply close the socket * channel or may involve reading and writing messages. */ diff --git a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java index a38a33182afea..4ed745723515c 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java @@ -512,12 +512,12 @@ private void handleQueuedWrites() { * This is a convenience method to be called after some object (normally channels) are enqueued with this * selector. This method will check if the selector is still open. If it is open, normal operation can * proceed. - * + *

        * If the selector is closed, then we attempt to remove the object from the queue. If the removal * succeeds then we throw an {@link IllegalStateException} indicating that normal operation failed. If * the object cannot be removed from the queue, then the object has already been handled by the selector * and operation can proceed normally. - * + *

        * If this method is called from the selector thread, we will not allow the queuing to occur as the * selector thread can manipulate its queues internally even if it is no longer open. * diff --git a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java index 12a1e80055823..3df8e42fe4f14 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java @@ -59,7 +59,7 @@ * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will * be called. This context will need to implement all protocol related logic. Additionally, if any special * close behavior is required, it should be implemented in this context. - * + *

        * The only methods of the context that should ever be called from a non-selector thread are * {@link #closeChannel()} and {@link #sendMessage(Object, BiConsumer)}. */ diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java new file mode 100644 index 0000000000000..c62288d280e2f --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Counter adds the value to the existing metric. + * {@opensearch.experimental} + */ +@ExperimentalApi +public interface Counter { + + /** + * add value. + * @param value value to be added. + */ + void add(double value); + + /** + * add value along with the attributes. + * + * @param value value to be added. + * @param tags attributes/dimensions of the metric. + */ + void add(double value, Tags tags); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java new file mode 100644 index 0000000000000..d57def9406b17 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.io.IOException; + +/** + * Default implementation for {@link MetricsRegistry} + */ +class DefaultMetricsRegistry implements MetricsRegistry { + private final MetricsTelemetry metricsTelemetry; + + /** + * Constructor + * @param metricsTelemetry metrics telemetry. + */ + public DefaultMetricsRegistry(MetricsTelemetry metricsTelemetry) { + this.metricsTelemetry = metricsTelemetry; + } + + @Override + public Counter createCounter(String name, String description, String unit) { + return metricsTelemetry.createCounter(name, description, unit); + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return metricsTelemetry.createUpDownCounter(name, description, unit); + } + + @Override + public void close() throws IOException { + metricsTelemetry.close(); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java new file mode 100644 index 0000000000000..61b3df089928b --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.Closeable; + +/** + * MetricsRegistry helps in creating the metric instruments. + * @opensearch.experimental + */ +@ExperimentalApi +public interface MetricsRegistry extends Closeable { + + /** + * Creates the counter. + * @param name name of the counter. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return counter. + */ + Counter createCounter(String name, String description, String unit); + + /** + * Creates the upDown counter. + * @param name name of the upDown counter. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return counter. + */ + Counter createUpDownCounter(String name, String description, String unit); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java index 0bf9482fe58d8..fb3dec8152b4f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -16,6 +16,6 @@ * @opensearch.experimental */ @ExperimentalApi -public interface MetricsTelemetry { +public interface MetricsTelemetry extends MetricsRegistry { } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java new file mode 100644 index 0000000000000..c1daf564dd3bc --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * No-op {@link Counter} + * {@opensearch.internal} + */ +@InternalApi +public class NoopCounter implements Counter { + + /** + * No-op Counter instance + */ + public final static NoopCounter INSTANCE = new NoopCounter(); + + private NoopCounter() {} + + @Override + public void add(double value) { + + } + + @Override + public void add(double value, Tags tags) { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java new file mode 100644 index 0000000000000..640c6842a8960 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +import java.io.IOException; + +/** + *No-op {@link MetricsRegistry} + * {@opensearch.internal} + */ +@InternalApi +public class NoopMetricsRegistry implements MetricsRegistry { + + /** + * No-op Meter instance + */ + public final static NoopMetricsRegistry INSTANCE = new NoopMetricsRegistry(); + + private NoopMetricsRegistry() {} + + @Override + public Counter createCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java new file mode 100644 index 0000000000000..7c7ed08044993 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + * {@opensearch.internal} + */ +@InternalApi +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java new file mode 100644 index 0000000000000..f2a8764f8021d --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.tags; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Class to create tags for a meter. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Tags { + private final Map tagsMap; + /** + * Empty value. + */ + public final static Tags EMPTY = new Tags(Collections.emptyMap()); + + /** + * Factory method. + * @return tags. + */ + public static Tags create() { + return new Tags(new HashMap<>()); + } + + /** + * Constructor. + */ + private Tags(Map tagsMap) { + this.tagsMap = tagsMap; + } + + /** + * Add String attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, String value) { + Objects.requireNonNull(value, "value cannot be null"); + tagsMap.put(key, value); + return this; + } + + /** + * Add long attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, long value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Add double attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, double value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Add boolean attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, boolean value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Returns the attribute map. + * @return tags map + */ + public Map getTagsMap() { + return Collections.unmodifiableMap(tagsMap); + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java new file mode 100644 index 0000000000000..70bc9be992b32 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + * @opensearch.experimental + */ +@ExperimentalApi +package org.opensearch.telemetry.metrics.tags; + +import org.opensearch.common.annotation.ExperimentalApi; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index d3c28b3a9cb5e..a3bb64ea392a9 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -25,7 +25,10 @@ */ @InternalApi class DefaultTracer implements Tracer { - static final String THREAD_NAME = "th_name"; + /** + * Current thread name. + */ + static final String THREAD_NAME = "thread.name"; private final TracingTelemetry tracingTelemetry; private final TracerContextStorage tracerContextStorage; @@ -82,6 +85,11 @@ public SpanScope withSpanInScope(Span span) { return DefaultSpanScope.create(span, tracerContextStorage).attach(); } + @Override + public boolean isRecording() { + return true; + } + private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { return tracingTelemetry.createSpan(spanCreationContext, parentSpan); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index 49295f417df9e..8257d251e9560 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -16,7 +16,7 @@ /** * Tracer is the interface used to create a {@link Span} * It automatically handles the context propagation between threads, tasks, nodes etc. - * + *

        * All methods on the Tracer object are multi-thread safe. * * @opensearch.experimental @@ -53,4 +53,10 @@ public interface Tracer extends HttpTracer, Closeable { */ SpanScope withSpanInScope(Span span); + /** + * Tells if the traces are being recorded or not + * @return boolean + */ + boolean isRecording(); + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java index 38c9104c3ec35..f04a505088424 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java @@ -35,9 +35,4 @@ public interface TracingTelemetry extends Closeable { */ TracingContextPropagator getContextPropagator(); - /** - * closes the resource - */ - void close(); - } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java index b0692f1b62a48..50d18c0a0d040 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java @@ -18,7 +18,7 @@ /** * HttpTracer helps in creating a {@link Span} which reads the incoming tracing information * from the HttpRequest header and propagate the span accordingly. - * + *

        * All methods on the Tracer object are multi-thread safe. * * @opensearch.experimental diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index c073e8d3e766f..50452ff5fe3b4 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -54,6 +54,11 @@ public SpanScope withSpanInScope(Span span) { return SpanScope.NO_OP; } + @Override + public boolean isRecording() { + return false; + } + @Override public void close() { diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java new file mode 100644 index 0000000000000..6171641db5f07 --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DefaultMetricsRegistryTests extends OpenSearchTestCase { + + private MetricsTelemetry metricsTelemetry; + private DefaultMetricsRegistry defaultMeterRegistry; + + @Override + public void setUp() throws Exception { + super.setUp(); + metricsTelemetry = mock(MetricsTelemetry.class); + defaultMeterRegistry = new DefaultMetricsRegistry(metricsTelemetry); + } + + public void testCounter() { + Counter mockCounter = mock(Counter.class); + when(defaultMeterRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenReturn(mockCounter); + Counter counter = defaultMeterRegistry.createCounter( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testCounter", + "test counter", + "1" + ); + assertSame(mockCounter, counter); + } + + public void testUpDownCounter() { + Counter mockCounter = mock(Counter.class); + when(defaultMeterRegistry.createUpDownCounter(any(String.class), any(String.class), any(String.class))).thenReturn(mockCounter); + Counter counter = defaultMeterRegistry.createUpDownCounter( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testUpDownCounter", + "test up-down counter", + "1" + ); + assertSame(mockCounter, counter); + } + +} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 0a717e993cb81..2a791f1ae4164 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -62,6 +62,7 @@ public void testCreateSpan() { String spanName = defaultTracer.getCurrentSpan().getSpan().getSpanName(); assertEquals("span_name", spanName); + assertTrue(defaultTracer.isRecording()); } @SuppressWarnings("unchecked") diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 3b857a2c690b9..d3d9ea174cf1b 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -628,7 +628,7 @@ public void testCreateRootSubParser() throws IOException { /** * Generates a random object {"first_field": "foo", "marked_field": {...random...}, "last_field": "bar} - * + *

        * Returns the number of tokens in the marked field */ private static int generateRandomObjectForMarking(XContentBuilder builder) throws IOException { diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java index de67cc2930652..de6b59b1546a5 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java @@ -46,7 +46,7 @@ /** * Descriptive stats gathered per shard. Coordinating node computes final correlation and covariance stats * based on these descriptive stats. This single pass, parallel approach is based on: - * + *

        * http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf */ public class RunningStats implements Writeable, Cloneable { @@ -222,7 +222,7 @@ private void updateCovariance(final String[] fieldNames, final Map * running computations taken from: http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf **/ public void merge(final RunningStats other) { diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index b0d9c1765190a..edb8c37c2dbdd 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -555,7 +555,7 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> { TokenStream ts = new ShingleFilter(input); - /** + /* * We disable the graph analysis on this token stream * because it produces shingles of different size. * Graph analysis on such token stream is useless and dangerous as it may create too many paths diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java index ad968aeee62cb..e9f3fd96dd69d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java @@ -49,12 +49,12 @@ * A factory for creating keyword marker token filters that prevent tokens from * being modified by stemmers. Two types of keyword marker filters are available: * the {@link SetKeywordMarkerFilter} and the {@link PatternKeywordMarkerFilter}. - * + *

        * The {@link SetKeywordMarkerFilter} uses a set of keywords to denote which tokens * should be excluded from stemming. This filter is created if the settings include * {@code keywords}, which contains the list of keywords, or {@code `keywords_path`}, * which contains a path to a file in the config directory with the keywords. - * + *

        * The {@link PatternKeywordMarkerFilter} uses a regular expression pattern to match * against tokens that should be excluded from stemming. This filter is created if * the settings include {@code keywords_pattern}, which contains the regular expression diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java index 78d151ee16c3b..04786689b50f0 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java @@ -45,7 +45,7 @@ /** Filters {@link StandardTokenizer} with {@link * LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}. - * + *

        * Available stemmers are listed in org.tartarus.snowball.ext. The name of a * stemmer is the part of the class name before "Stemmer", e.g., the stemmer in * {@link org.tartarus.snowball.ext.EnglishStemmer} is named "English". diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java index 9149b8939b739..665ea6c5f2f37 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -48,7 +48,7 @@ /** * A {@link SingleDimensionValuesSource} for geotile values. - * + *

        * Since geotile values can be represented as long values, this class is almost the same as {@link LongValuesSource} * The main differences is {@link GeoTileValuesSource#setAfter(Comparable)} as it needs to accept geotile string values i.e. "zoom/x/y". * diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java index 588c8bc59c2e0..6ff38fa28978e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java @@ -37,7 +37,7 @@ /** * Class representing {@link CellValues} whose values are filtered * according to whether they are within the specified {@link GeoBoundingBox}. - * + *

        * The specified bounding box is assumed to be bounded. * * @opensearch.internal diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java index 37320c0e900a5..7e114023fb86f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java @@ -37,7 +37,7 @@ /** * Exception class thrown by {@link FailProcessor}. - * + *

        * This exception is caught in the {@link CompoundProcessor} and * then changes the state of {@link IngestDocument}. This * exception should get serialized. diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java index 741a4fb29cfb8..b7c417f5f44a5 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java @@ -53,10 +53,10 @@ /** * A processor that for each value in a list executes a one or more processors. - * + *

        * This can be useful in cases to do string operations on json array of strings, * or remove a field from objects inside a json array. - * + *

        * Note that this processor is experimental. */ public final class ForEachProcessor extends AbstractProcessor implements WrappingProcessor { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java index ca0c0df40f009..e42a1147825d1 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java @@ -155,4 +155,28 @@ public void testNullValueWithOutIgnoreMissing() { IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); } + + public void testMatchEmptyBrackets() { + IngestDocument ingestDocument = new IngestDocument( + "_index", + "_id", + null, + null, + null, + Collections.singletonMap("message", "[foo],[bar],[]") + ); + DissectProcessor dissectProcessor = new DissectProcessor("", null, "message", "[%{a}],[%{b}],[%{c}]", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("foo", ingestDocument.getFieldValue("a", String.class)); + assertEquals("bar", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + + ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("message", "{}{}{}{baz}")); + dissectProcessor = new DissectProcessor("", null, "message", "{%{a}}{%{b}}{%{c}}{%{d}}", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("", ingestDocument.getFieldValue("a", String.class)); + assertEquals("", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + assertEquals("baz", ingestDocument.getFieldValue("d", String.class)); + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml index 916a7fe656cc2..d90e5fbf2362b 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -84,3 +84,38 @@ teardown: } ] } + +--- +"Test dissect processor can match empty brackets": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "[%{a}][%{b}][%{c}]" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {message: "[foo][bar][]"} + + - do: + get: + index: test + id: 1 + - match: { _source.message: "[foo][bar][]" } + - match: { _source.a: "foo" } + - match: { _source.b: "bar" } + - match: { _source.c: "" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml index e012a82b15927..7c073739f6a1f 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -976,3 +976,140 @@ teardown: } - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "Pipeline processor configured for non-existent pipeline [____pipeline_doesnot_exist___]" } + +--- +"Test simulate with docs containing metadata fields": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field": "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_version": 100, + "_if_seq_no": 12333333333333333, + "_if_primary_term": 1, + "_source": { + "foo": "bar" + } + } + ] + } + + - length: { docs: 1 } + - match: { docs.0.doc._index: "index" } + - match: { docs.0.doc._id: "id" } + - match: { docs.0.doc._routing: "foo" } + - match: { docs.0.doc._version: "100" } + - match: { docs.0.doc._if_seq_no: "12333333333333333" } + - match: { docs.0.doc._if_primary_term: "1" } + - match: { docs.0.doc._source.foo: "bar" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_version": "bar", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_version], only int or long is accepted" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_if_seq_no": "123", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_seq_no], only int or long is accepted" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_if_primary_term": "1", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_primary_term], only int or long is accepted" } diff --git a/modules/lang-expression/licenses/asm-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-expression/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c1daa91dd5433..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57e2b0cca55da8ad856dfd60be42e6daabbc98c3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..892865a017f48 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7725476acfcb9bdfeff1b813ce15c39c6b857dc2 \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 8ca28a905f216..bb2f652168d5c 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -504,6 +504,10 @@ public void testInvalidFieldMember() { } public void testSpecialValueVariable() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10079", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); // i.e. _value for aggregations createIndex("test"); ensureGreen("test"); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 035d2402857e0..11b2e20eea523 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -73,7 +73,7 @@ /** * Provides the infrastructure for Lucene expressions as a scripting language for OpenSearch. - * + *

        * Only contexts returning numeric types or {@link Object} are supported. */ public class ExpressionScriptEngine implements ScriptEngine { diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java index f4d7198dc2124..ec84475b70bb6 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java @@ -59,7 +59,7 @@ /** * Main entry point handling template registration, compilation and * execution. - * + *

        * Template handling is based on Mustache. Template handling is a two step * process: First compile the string representing the template, the resulting * {@link Mustache} object can then be re-used for subsequent executions. diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java index 9e97863306148..fbb7d09709a91 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java @@ -200,7 +200,7 @@ private String getChars() { /** * From https://www.ietf.org/rfc/rfc4627.txt: - * + *

        * All Unicode characters may be placed within the * quotation marks except for the characters that must be escaped: * quotation mark, reverse solidus, and the control characters (U+0000 diff --git a/modules/lang-painless/licenses/asm-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-painless/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 deleted file mode 100644 index 9e87d3ce7d719..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -490bacc77de7cbc0be1a30bb3471072d705be4a4 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 new file mode 100644 index 0000000000000..fa42ea1198165 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 @@ -0,0 +1 @@ +9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 deleted file mode 100644 index 5fffbfe655deb..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64b5a1fc8c1b15ed2efd6a063e976bc8d3dc5ffe \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 new file mode 100644 index 0000000000000..1f42ac62dc69c --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 @@ -0,0 +1 @@ +f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java index 56ade63efa5e2..265263b41ca89 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java @@ -42,7 +42,7 @@ * Allowlist contains data structures designed to be used to generate an allowlist of Java classes, * constructors, methods, and fields that can be used within a Painless script at both compile-time * and run-time. - * + *

        * A Allowlist consists of several pieces with {@link AllowlistClass}s as the top level. Each * {@link AllowlistClass} will contain zero-to-many {@link AllowlistConstructor}s, {@link AllowlistMethod}s, and * {@link AllowlistField}s which are what will be available with a Painless script. See each individual diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java index 17e6814addf3b..67f5a07846c53 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java @@ -45,12 +45,12 @@ * classes. Though, since multiple allowlists may be combined into a single allowlist for a * specific context, as long as multiple classes representing the same Java class have the same * class name and have legal constructor/method overloading they can be merged together. - * + *

        * Classes in Painless allow for arity overloading for constructors and methods. Arity overloading * means that multiple constructors are allowed for a single class as long as they have a different * number of parameters, and multiples methods with the same name are allowed for a single class * as long as they have the same return type and a different number of parameters. - * + *

        * Classes will automatically extend other allowlisted classes if the Java class they represent is a * subclass of other classes including Java interfaces. */ diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java index 71265f82acacc..632fee9187eba 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java @@ -67,11 +67,11 @@ public static Allowlist loadFromResourceFiles(Class resource, String... filep * is the path of a single text file. The {@link Class}'s {@link ClassLoader} will be used to lookup the Java * reflection objects for each individual {@link Class}, {@link Constructor}, {@link Method}, and {@link Field} * specified as part of the allowlist in the text file. - * + *

        * A single pass is made through each file to collect all the information about each class, constructor, method, * and field. Most validation will be done at a later point after all allowlists have been gathered and their * merging takes place. - * + *

        * A painless type name is one of the following: *

          *
        • def - The Painless dynamic type which is automatically included without a need to be @@ -129,13 +129,13 @@ public static Allowlist loadFromResourceFiles(Class resource, String... filep * be appropriately parsed and handled. Painless complex types must be specified with the * fully-qualified Java class name. Method argument types, method return types, and field types * must be specified with Painless type names (def, fully-qualified, or short) as described earlier. - * + *

          * The following example is used to create a single allowlist text file: * - * {@code + *

                * # primitive types
                *
          -     * class int -> int {
          +     * class int -> int {
                * }
                *
                * # complex types
          @@ -161,7 +161,7 @@ public static Allowlist loadFromResourceFiles(Class resource, String... filep
                *   int value1
                *   def value2
                * }
          -     * }
          +     * 
          */ public static Allowlist loadFromResourceFiles(Class resource, Map parsers, String... filepaths) { List allowlistClasses = new ArrayList<>(); diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java index 8bb0231ff3f4f..9fcaec3dbf7b6 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java @@ -45,7 +45,7 @@ * are using the '.' operator on an existing class variable/field. Painless classes may have multiple * methods with the same name as long as they comply with arity overloading described in * {@link AllowlistClass}. - * + *

          * Classes may also have additional methods that are not part of the Java class the class represents - * these are known as augmented methods. An augmented method can be added to a class as a part of any * Java class as long as the method is static and the first parameter of the method is the Java class diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index 35c676653fdc3..c19d4f361b2b6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -73,9 +73,7 @@ final class Compiler { */ private static final CodeSource CODESOURCE; - /** - * Setup the code privileges. - */ + /* Setup the code privileges. */ static { try { // Setup the code privileges. diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java index cae425ad1fe3b..3164f5e6388c7 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java @@ -42,13 +42,13 @@ /** * PainlessLookupUtility contains methods shared by {@link PainlessLookupBuilder}, {@link PainlessLookup}, and other classes within * Painless for conversion between type names and types along with some other various utility methods. - * + *

          * The following terminology is used for variable names throughout the lookup package: - * + *

          * A class is a set of methods and fields under a specific class name. A type is either a class or an array under a specific type name. * Note the distinction between class versus type is class means that no array classes will be be represented whereas type allows array * classes to be represented. The set of available classes will always be a subset of the available types. - * + *

          * Under ambiguous circumstances most variable names are prefixed with asm, java, or painless. If the variable value is the same for asm, * java, and painless, no prefix is used. Target is used as a prefix to represent if a constructor, method, or field is being * called/accessed on that specific class. Parameter is often a postfix used to represent if a type is used as a parameter to a diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java index 04165f44ba212..8a05d6742af97 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java @@ -126,9 +126,9 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { /** * Visits an expression that is also considered a statement. - * + *

          * If the statement is a return from the execute method, performs return value conversion. - * + *

          * Checks: control flow, type validation */ @Override @@ -168,9 +168,9 @@ public void visitExpression(SExpression userExpressionNode, SemanticScope semant /** * Visits a return statement and casts the value to the return type if possible. - * + *

          * If the statement is a return from the execute method, performs return value conversion. - * + *

          * Checks: type validation */ @Override diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java index e27530d745e8f..5ac802038afa6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java @@ -49,7 +49,7 @@ * Tracks information within a scope required for compilation during the * semantic phase in the user tree. There are three types of scopes - * {@link FunctionScope}, {@link LambdaScope}, and {@link BlockScope}. - * + *

          * Scopes are stacked as they are created during the user tree's semantic * phase with each scope beyond the top-level containing a reference to * its parent. As a scope is no longer necessary, it's dropped automatically diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 397a7b48b472a..366e848416328 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -82,7 +82,7 @@ /** * Mapper for a text field that optimizes itself for as-you-type completion by indexing its content into subfields. Each subfield * modifies the analysis chain of the root field to index terms the user would create as they type out the value in the root field - * + *

          * The structure of these fields is * *

          diff --git a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
          index 1a51259e9e4e4..e930780613ed6 100644
          --- a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
          +++ b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
          @@ -373,7 +373,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException {
                * A query that rewrites into another query using
                * {@link JoinUtil#createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, OrdinalMap, int, int)}
                * that executes the actual join.
          -     *
          +     * 

          * This query is exclusively used by the {@link HasChildQueryBuilder} and {@link HasParentQueryBuilder} to get access * to the {@link DirectoryReader} used by the current search in order to retrieve the {@link OrdinalMap}. * The {@link OrdinalMap} is required by {@link JoinUtil} to execute the join. diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java index ffdea14e0873d..8e72c6ef06849 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java @@ -71,10 +71,10 @@ * supplied query parameters) against a set of possible search requests (read: * search specifications, expressed as query/search request templates) and * compares the result against a set of annotated documents per search intent. - * + *

          * If any documents are returned that haven't been annotated the document id of * those is returned per search intent. - * + *

          * The resulting search quality is computed in terms of precision at n and * returned for each search specification for the full set of search intents as * averaged precision at n. diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java index fcf1fa489f740..d96e3212e05a2 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -67,7 +67,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { /** * Assuming the docs are ranked in the following order: - * + *

          * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 | 7.0 |  @@ -76,7 +76,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { * 4 | 0 | 0.0 | 2.321928094887362 | 0.0 * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | 2 | 3.0 | 2.807354922057604 | 1.0686215613240666 - * + *

          * dcg = 13.84826362927298 (sum of last column) */ public void testDCGAt() { @@ -91,20 +91,20 @@ public void testDCGAt() { DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); assertEquals(EXPECTED_DCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 - * 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 - * 6 | 0 | 0.0 | 2.807354922057604  | 0.0 - * - * idcg = 14.595390756454922 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 + 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 + 6 | 0 | 0.0 | 2.807354922057604  | 0.0 + + idcg = 14.595390756454922 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(EXPECTED_NDCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -113,7 +113,7 @@ public void testDCGAt() { /** * This tests metric when some documents in the search result don't have a * rating provided by the user. - * + *

          * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -122,7 +122,7 @@ public void testDCGAt() { * 4 | n/a | n/a | n/a | n/a * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + *

          * dcg = 12.779642067948913 (sum of last column) */ public void testDCGAtSixMissingRatings() { @@ -143,20 +143,20 @@ public void testDCGAtSixMissingRatings() { assertEquals(12.779642067948913, result.metricScore(), DELTA); assertEquals(2, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * ---------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + ---------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.779642067948913 / 13.347184833073591, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -166,7 +166,7 @@ public void testDCGAtSixMissingRatings() { * This tests that normalization works as expected when there are more rated * documents than search hits because we restrict DCG to be calculated at the * fourth position - * + *

          * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -176,7 +176,7 @@ public void testDCGAtSixMissingRatings() { * ----------------------------------------------------------------- * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + *

          * dcg = 12.392789260714371 (sum of last column until position 4) */ public void testDCGAtFourMoreRatings() { @@ -200,21 +200,21 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371, result.metricScore(), DELTA); assertEquals(1, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * --------------------------------------------------------------------------------------- - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + --------------------------------------------------------------------------------------- + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).metricScore(), DELTA); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index e2442e1f483f0..6ed486fbdb33b 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -206,7 +206,7 @@ public abstract class AbstractAsyncBulkByScrollAction< /** * Build the {@link BiFunction} to apply to all {@link RequestWrapper}. - * + *

          * Public for testings.... */ public BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java index d5a6e392f2019..7534de1408bcc 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -63,14 +63,14 @@ private BulkByScrollParallelizationHelper() {} /** * Takes an action created by a {@link BulkByScrollTask} and runs it with regard to whether the request is sliced or not. - * + *

          * If the request is not sliced (i.e. the number of slices is 1), the worker action in the given {@link Runnable} will be started on * the local node. If the request is sliced (i.e. the number of slices is more than 1), then a subrequest will be created for each * slice and sent. - * + *

          * If slices are set as {@code "auto"}, this class will resolve that to a specific number based on characteristics of the source * indices. A request with {@code "auto"} slices may end up being sliced or unsliced. - * + *

          * This method is equivalent to calling {@link #initTaskState} followed by {@link #executeSlicedAction} */ static > void startSlicedAction( @@ -98,11 +98,11 @@ public void onFailure(Exception e) { /** * Takes an action and a {@link BulkByScrollTask} and runs it with regard to whether this task is a * leader or worker. - * + *

          * If this task is a worker, the worker action in the given {@link Runnable} will be started on the local * node. If the task is a leader (i.e. the number of slices is more than 1), then a subrequest will be * created for each slice and sent. - * + *

          * This method can only be called after the task state is initialized {@link #initTaskState}. */ static > void executeSlicedAction( @@ -125,7 +125,7 @@ static > void executeSliced /** * Takes a {@link BulkByScrollTask} and ensures that its initial task state (leader or worker) is set. - * + *

          * If slices are set as {@code "auto"}, this method will resolve that to a specific number based on * characteristics of the source indices. A request with {@code "auto"} slices may end up being sliced or * unsliced. This method does not execute the action. In order to execute the action see diff --git a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java index 9e9d94c8e8fc0..4c8d8aab4532b 100644 --- a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java @@ -113,7 +113,7 @@ public URLRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - super(metadata, false, namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java new file mode 100644 index 0000000000000..c39567a005fd1 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.transport.Netty4BlockingPlugin; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBufUtil; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4HeaderVerifierIT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(Netty4BlockingPlugin.class); + } + + public void testThatNettyHttpServerRequestBlockedWithHeaderVerifier() throws Exception { + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + final FullHttpRequest blockedRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + blockedRequest.headers().add("blockme", "Not Allowed"); + blockedRequest.headers().add(HOST, "localhost"); + blockedRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + + final List responses = new ArrayList<>(); + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http2()) { + try { + FullHttpResponse blockedResponse = nettyHttpClient.send(transportAddress.address(), blockedRequest); + responses.add(blockedResponse); + String blockedResponseContent = new String(ByteBufUtil.getBytes(blockedResponse.content()), StandardCharsets.UTF_8); + assertThat(blockedResponseContent, containsString("Hit header_verifier")); + assertThat(blockedResponse.status().code(), equalTo(401)); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 6c8ca665424a6..826d4a7e5d61e 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -57,7 +57,7 @@ /** * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. - * + *

          * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing * a single node "cluster". */ diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java new file mode 100644 index 0000000000000..d5fe49952add3 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.netty4.Netty4HttpServerTransport; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCountUtil; + +public class Netty4BlockingPlugin extends Netty4ModulePlugin { + + public class Netty4BlockingHttpServerTransport extends Netty4HttpServerTransport { + + public Netty4BlockingHttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + tracer + ); + } + + @Override + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + return new ExampleBlockingNetty4HeaderVerifier(); + } + } + + @Override + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + NETTY_HTTP_TRANSPORT_NAME, + () -> new Netty4BlockingHttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + /** POC for how an external header verifier would be implemented */ + public class ExampleBlockingNetty4HeaderVerifier extends SimpleChannelInboundHandler { + + @Override + public void channelRead0(ChannelHandlerContext ctx, DefaultHttpRequest msg) throws Exception { + ReferenceCountUtil.retain(msg); + if (isBlocked(msg)) { + ByteBuf buf = Unpooled.copiedBuffer("Hit header_verifier".getBytes(StandardCharsets.UTF_8)); + final FullHttpResponse response = new DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.UNAUTHORIZED, buf); + ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); + ReferenceCountUtil.release(msg); + } else { + // Lets the request pass to the next channel handler + ctx.fireChannelRead(msg); + } + } + + private boolean isBlocked(HttpRequest request) { + final boolean shouldBlock = request.headers().contains("blockme"); + + return shouldBlock; + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index a83330356e35e..6475a0b744c60 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -40,6 +40,7 @@ import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelPipeline; @@ -98,6 +99,22 @@ public Channel getNettyChannel() { return channel; } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + Object handler = getNettyChannel().pipeline().get(name); + + if (handler == null && inboundPipeline() != null) { + handler = inboundPipeline().get(name); + } + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + @Override public String toString() { return "Netty4HttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java index 7d937157c1034..3c96affb7adf7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java @@ -258,7 +258,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + *

          * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 0271472125814..1677f333a4b1c 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -334,7 +334,7 @@ public ChannelHandler configureServerChannelHandler() { return new HttpChannelHandler(this, handlingSettings); } - protected static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); + public static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); protected static final AttributeKey HTTP_SERVER_CHANNEL_KEY = AttributeKey.newInstance( "opensearch-http-server-channel" ); @@ -419,8 +419,8 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws E // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP final ChannelPipeline pipeline = ctx.pipeline(); pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); - pipeline.replace(this, "decoder_compress", new HttpContentDecompressor()); - + pipeline.replace(this, "header_verifier", transport.createHeaderVerifier()); + pipeline.addAfter("header_verifier", "decoder_compress", transport.createDecompressor()); pipeline.addAfter("decoder_compress", "aggregator", aggregator); if (handlingSettings.isCompression()) { pipeline.addAfter( @@ -446,7 +446,8 @@ protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); pipeline.addLast("decoder", decoder); - pipeline.addLast("decoder_compress", new HttpContentDecompressor()); + pipeline.addLast("header_verifier", transport.createHeaderVerifier()); + pipeline.addLast("decoder_compress", transport.createDecompressor()); pipeline.addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); @@ -487,13 +488,13 @@ protected void initChannel(Channel childChannel) throws Exception { final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - childChannel.pipeline() .addLast(new LoggingHandler(LogLevel.DEBUG)) .addLast(new Http2StreamFrameToHttpObjectCodec(true)) .addLast("byte_buf_sizer", byteBufSizer) .addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)) - .addLast("decoder_decompress", new HttpContentDecompressor()); + .addLast("header_verifier", transport.createHeaderVerifier()) + .addLast("decoder_decompress", transport.createDecompressor()); if (handlingSettings.isCompression()) { childChannel.pipeline() @@ -531,4 +532,21 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { } } } + + /** + * Extension point that allows a NetworkPlugin to extend the netty pipeline and inspect headers after request decoding + */ + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + // pass-through + return new ChannelInboundHandlerAdapter(); + } + + /** + * Extension point that allows a NetworkPlugin to override the default netty HttpContentDecompressor and supply a custom decompressor. + * + * Used in instances to conditionally decompress depending on the outcome from header verification + */ + protected ChannelInboundHandlerAdapter createDecompressor() { + return new HttpContentDecompressor(); + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java index b73f74d187a2a..4bab91565d3ad 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java @@ -65,7 +65,7 @@ * This class is adapted from {@link NioSocketChannel} class in the Netty project. It overrides the channel * read/write behavior to ensure that the bytes are always copied to a thread-local direct bytes buffer. This * happens BEFORE the call to the Java {@link SocketChannel} is issued. - * + *

          * The purpose of this class is to allow the disabling of netty direct buffer pooling while allowing us to * control how bytes end up being copied to direct memory. If we simply disabled netty pooling, we would rely * on the JDK's internal thread local buffer pooling. Instead, this class allows us to create a one thread diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index ca51d70702a82..2bc795d11ed5d 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -96,7 +96,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( NETTY_TRANSPORT_NAME, @@ -108,7 +109,8 @@ public Map> getTransports( pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - getSharedGroupFactory(settings) + getSharedGroupFactory(settings), + tracer ) ); } @@ -142,7 +144,7 @@ public Map> getHttpTransports( ); } - private SharedGroupFactory getSharedGroupFactory(Settings settings) { + SharedGroupFactory getSharedGroupFactory(Settings settings) { SharedGroupFactory groupFactory = this.groupFactory.get(); if (groupFactory != null) { assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java index 5db1f7c333157..79a5bf9e95121 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java @@ -41,6 +41,7 @@ import org.opensearch.transport.TransportException; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -164,6 +165,18 @@ public void sendMessage(BytesReference reference, ActionListener listener) } } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + final Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java index f77c29c8bfa60..e76a227630dc1 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java @@ -50,6 +50,7 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Netty4NioSocketChannel; import org.opensearch.transport.NettyAllocator; @@ -131,9 +132,10 @@ public Netty4Transport( PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, - SharedGroupFactory sharedGroupFactory + SharedGroupFactory sharedGroupFactory, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 3e5f71f1464a1..c92ccba82835f 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -40,6 +40,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; @@ -86,7 +87,8 @@ public void startThreadPool() { recycler, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); nettyTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java index 98a001b8ae4bb..7cca00db68559 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -141,7 +142,8 @@ private TcpTransport startTransport(Settings settings, ThreadPool threadPool) { recycler, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); transport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java index 35b19002dce8d..710b3ff6bd0ca 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.AbstractSimpleTransportTestCase; @@ -82,7 +83,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) { @Override diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 035b47c5f388c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0deb3b85eadf831be17b48acab0785fd9d34fc44 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ef410899981ca --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7133d34e92770f59eb28686f4d511b9f3f32e970 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 6ff5a433f0a4e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a204267d68ce4ba36bfddc366cd6865cf5e1378 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..46b83c9e40b3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 @@ -0,0 +1 @@ +be44282e1f6b91a0650fcceb558053d6bdd4863d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index a65ab33a31e2a..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71e8e811f873ba2b47c7ecf9d890cbeac5b6be41 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..36664695a7818 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 @@ -0,0 +1 @@ +bd1f80d33346f7e588685484ef29a304db5190e4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 04ab7b7e7adb8..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e1274273895365bd83391cc4b79f5264479f5de \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..003ccdf8b0727 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b9ffdc7a52d2087ecb03318ec06305b480cdfe82 \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java index c80d32228feeb..b875fab7d4006 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java @@ -35,13 +35,13 @@ /** * Geänderter Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + *

          * Die Kölner Phonetik wurde für den Einsatz in Namensdatenbanken wie * der Verwaltung eines Krankenhauses durch Martin Haase (Institut für * Sprachwissenschaft, Universität zu Köln) und Kai Heitmann (Insitut für * medizinische Statistik, Informatik und Epidemiologie, Köln) überarbeitet. * M. Haase und K. Heitmann. Die Erweiterte Kölner Phonetik. 526, 2000. - * + *

          * nach: Martin Wilz, Aspekte der Kodierung phonetischer Ähnlichkeiten * in deutschen Eigennamen, Magisterarbeit. * http://www.uni-koeln.de/phil-fak/phonetik/Lehre/MA-Arbeiten/magister_wilz.pdf diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java index 33e386af9f364..4bcc10ff73b0a 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java @@ -46,13 +46,13 @@ /** * Kölner Phonetik - * + *

          * H.J. Postel, Die Kölner Phonetik. Ein Verfahren zu Identifizierung * von Personennamen auf der Grundlage der Gestaltanalyse. IBM-Nachrichten 19 (1969), 925-931 - * + *

          * Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + *

          * mit Änderungen von Jörg Prante * */ diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java index 818dbba85e2de..c3237114c65d5 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java @@ -40,7 +40,7 @@ /** * * Taken from commons-codec trunk (unreleased yet) - * + *

          * Encodes a string into a NYSIIS value. NYSIIS is an encoding used to relate * similar names, but can also be used as a general purpose scheme to find word * with similar phonemes. diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index cef3f97d03c51..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e634c8685edad2bdb5c13748b18c0c1a46bb63a3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..e22eaa474016f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f73e2007b133fb699e517ef13b4952844f0150d8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 3e2dd19a9dd85..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0afdf2afacbae39414ed06325fbb4bed17c07a7d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..1ebe42a2a2f56 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 @@ -0,0 +1 @@ +2c09cbc021a8f81a01600a1d2a999361e70f7aed \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 8c0544acd1ca0..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -166e2ea297182f7bf7070af02aacea9e6a3a19c8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..3c4523d45c0f5 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b054f2c7b11fc7c5601b4c3cdf18aa7508612898 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index 6bed6564cfd36..02e1ff40f7ed6 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -92,7 +92,8 @@ protected MockTransportService createTransportService() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, writableRegistry(), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public TransportAddress[] addressesFromString(String address) { diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java index 3311ddc8842f2..ce097667f9c4b 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java @@ -77,7 +77,8 @@ protected MockTransportService createTransportService() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index b4af9773f33de..2208c78bef67a 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -66,7 +66,6 @@ * compute/v1/projects/[project-id]/zones/[zone] * * By default, project-id is the test method name, lowercase and missing the "test" prefix. - * * For example, if you create a test `myNewAwesomeTest` with following settings: * * Settings nodeSettings = Settings.builder() @@ -75,7 +74,6 @@ * .build(); * * You need to create a file under `src/test/resources/org/opensearch/discovery/gce/` named: - * * compute/v1/projects/mynewawesometest/zones/europe-west1-b/instances.json * */ diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java index 7763b1e42d63e..00b55eb75995c 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java @@ -27,12 +27,12 @@ public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat { /** * the hyper-parameters for constructing HNSW graphs. - * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + * HnswGraph.html */ public static final String METHOD_PARAMETER_M = "m"; /** * the hyper-parameters for constructing HNSW graphs. - * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + * HnswGraph.html */ public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction"; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java index 134c6519d7220..3e567d0c04e53 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java @@ -203,6 +203,7 @@ public void testDoToQueryInvalidFieldType() { /** * test serialization of Correlation Query Builder + * @throws Exception throws an IOException if serialization fails * @throws Exception Exception */ public void testSerialization() throws Exception { diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java index 9255d3a2f299e..c8e9d8cfc0a12 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java @@ -53,7 +53,6 @@ public class CustomSuggestion extends Suggest.Suggestion /** * An integer representing the type of the suggestion formerly used for internal serialization over the network. - * * This class is now serialized as a NamedWriteable and this value only remains for backwards compatibility */ public static final int TYPE = 999; @@ -106,7 +105,7 @@ public int getWriteableType() { /** * A meaningless value used to test that plugin suggesters can add fields to their Suggestion types - * + *

          * This can't be serialized to xcontent because Suggestions appear in xcontent as an array of entries, so there is no place * to add a custom field. But we can still use a custom field internally and use it to define a Suggestion's behavior * diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java index ca58d97377478..ee04741eb5a03 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java @@ -34,7 +34,7 @@ /** * An example of a class to be allowlisted for use by painless scripts - * + *

          * Each of the members and methods below are allowlisted for use in search scripts. * See example_allowlist.txt. */ diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java index 0833f1f0c6659..35385e98187ea 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java @@ -34,7 +34,7 @@ /** * An example of an instance to be allowlisted for use by painless scripts. - * + *

          * Each of the members and methods below are allowlisted for use in search scripts but only from this instance. */ public class ExampleAllowlistedInstance { diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java index 428a0cc688ad3..ac42c2b1cf9bc 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java @@ -34,7 +34,7 @@ /** * An example of an annotation to be allowlisted for use by painless scripts - * + *

          * The annotation below is allowlisted for use in search scripts. * See example_allowlist.txt. */ diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java index 25443977a0f66..2f6d1ad9349bc 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java @@ -34,7 +34,7 @@ /** * An example of a class with static methods to be allowlisted for use by painless scripts - * + *

          * The method below is allowlisted for use in search scripts. * See example_allowlist.txt. */ diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index ddfb99e626718..a14215aa7655b 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -10,13 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.shiro.SecurityUtils; import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; import org.opensearch.common.Randomness; import org.opensearch.identity.IdentityService; import org.opensearch.identity.Subject; -import org.opensearch.identity.noop.NoopSubject; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.BasicAuthToken; import org.opensearch.identity.tokens.OnBehalfOfClaims; @@ -88,20 +86,6 @@ public AuthToken issueServiceAccountToken(String audience) { return token; } - @Override - public Subject authenticateToken(AuthToken authToken) { - return new NoopSubject(); - } - - public boolean validateToken(AuthToken token) { - if (token instanceof BasicAuthToken) { - final BasicAuthToken basicAuthToken = (BasicAuthToken) token; - return basicAuthToken.getUser().equals(SecurityUtils.getSubject().toString()) - && basicAuthToken.getPassword().equals(shiroTokenPasswordMap.get(basicAuthToken)); - } - return false; - } - public String getTokenInfo(AuthToken token) { if (token instanceof BasicAuthToken) { final BasicAuthToken basicAuthToken = (BasicAuthToken) token; diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java index db77ced298991..f99484083e2fb 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java @@ -100,11 +100,6 @@ public void testShouldFailGetTokenInfo() { assertThrows(UnsupportedAuthenticationToken.class, () -> shiroAuthTokenHandler.getTokenInfo(bearerAuthToken)); } - public void testShouldFailValidateToken() { - final BearerAuthToken bearerAuthToken = new BearerAuthToken("header.payload.signature"); - assertFalse(shiroAuthTokenHandler.validateToken(bearerAuthToken)); - } - public void testShoudPassMapLookupWithToken() { final BasicAuthToken authToken = new BasicAuthToken("Basic dGVzdDp0ZTpzdA=="); shiroAuthTokenHandler.getShiroTokenPasswordMap().put(authToken, "te:st"); diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 330a17c02bc7a..8945c09fca28b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 935887261dcc9..952cff96860f2 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -73,9 +73,9 @@ import java.util.regex.Pattern; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + *

          * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + *

          * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..dfa4a0fbea94c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index f592ac8312a5d..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d266d079ef33cf93a16b382d64dd15d562df1159 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9bc8c96aec7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +a9fbf4d64b08abed542eefd5f7aed4807edca56f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 deleted file mode 100644 index d06147a0ba646..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30e8fa29a349db5a933225d61891b8802836bb79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..35d9d82202274 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +af3cf676eed30184215426ecf0f0dde15555ea9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 deleted file mode 100644 index 67c3a763d26fa..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a99ecef0e1d86a92e40a7c89805c236d9cd7493e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..0948daa05fff6 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index 60fd706436ae7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c50f835777ecd4535e15b552b5d9ccb26a2504f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 82364d1b7b3c1..986720ec431fe 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -154,7 +154,7 @@ private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implemen /** * HTTP handler that injects random Azure service errors - * + *

          * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java index 65852c4fc5bd0..47a5536a6cd8a 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java @@ -47,6 +47,8 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -114,14 +116,7 @@ public AzureRepository( final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super( - metadata, - COMPRESS_SETTING.get(metadata.settings()), - namedXContentRegistry, - clusterService, - recoverySettings, - buildLocation(metadata) - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; @@ -192,4 +187,13 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(Repository.BASE_PATH_SETTING); + restrictedSettings.add(Repository.LOCATION_MODE_SETTING); + return restrictedSettings; + } } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index 9dcc312f8f5a7..b60701ba5e533 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -212,8 +212,8 @@ protected PasswordAuthentication getPasswordAuthentication() { /** * The location mode is not there in v12 APIs anymore but it is possible to mimic its semantics using - * retry options and combination of primary / secondary endpoints. Refer to migration guide for mode details: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * retry options and combination of primary / secondary endpoints. Refer to + * migration guide for mode details: */ private BlobServiceClientBuilder applyLocationMode(final BlobServiceClientBuilder builder, final AzureStorageSettings settings) { final StorageConnectionString storageConnectionString = StorageConnectionString.create(settings.getConnectString(), logger); @@ -335,8 +335,8 @@ private void closeInternally(ClientState state) { } /** - * Implements HTTP pipeline policy to collect statistics on API calls. See please: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * Implements HTTP pipeline policy to collect statistics on API calls. See : + * migration guide */ private static class HttpStatsPolicy implements HttpPipelinePolicy { private final BiConsumer statsCollector; diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index c8be30dbaf865..3356e5174592a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -34,16 +34,20 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; +import java.util.List; + import reactor.core.scheduler.Schedulers; import static org.hamcrest.Matchers.is; @@ -179,4 +183,21 @@ public void testChunkSize() { ); } + public void testSystemRepositoryDefault() { + assertThat(azureRepository(Settings.EMPTY).isSystemRepository(), is(false)); + } + + public void testSystemRepositoryOn() { + assertThat(azureRepository(Settings.builder().put("system_repository", true).build()).isSystemRepository(), is(true)); + } + + public void testRestrictedSettingsDefault() { + List> restrictedSettings = azureRepository(Settings.EMPTY).getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.BASE_PATH_SETTING)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.LOCATION_MODE_SETTING)); + } } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 264888bb7da3a..bb0eafc7d1d4a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -451,7 +451,8 @@ private static RequestRetryPolicy requestRetryOptions(BlobServiceClient client) } /** - * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile + * Extract the blob name from a URI like : + * {@code https://myservice.azure.net/container/path/to/myfile } * It should remove the container part (first part of the path) and gives path/to/myfile * @param uri URI to parse * @return The blob name relative to the container diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index ec040a180876e..1bef5146f1db9 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -53,14 +53,14 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:2.32.0' + api 'com.google.api:gax:2.35.0' api 'com.google.api:gax-httpjson:0.103.1' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.25.0' + api 'com.google.api.grpc:proto-google-common-protos:2.25.1' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" @@ -75,8 +75,8 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.2' - api 'com.google.http-client:google-http-client-appengine:1.43.2' + api 'com.google.http-client:google-http-client:1.43.3' + api 'com.google.http-client:google-http-client-appengine:1.43.3' api 'com.google.http-client:google-http-client-gson:1.43.3' api 'com.google.http-client:google-http-client-jackson2:1.43.3' diff --git a/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 deleted file mode 100644 index 9cae74e1c3673..0000000000000 --- a/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522bf3c2a738847b9719eac8ce572be0f84da40a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 new file mode 100644 index 0000000000000..778922c637dc1 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 @@ -0,0 +1 @@ +98d52034cfa6d1b881e16f418894afcfacd89b7a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 deleted file mode 100644 index a576a74c62542..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2520469ebd8c0675f0d2aeafd2da665228320fcf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..800467de8bdf3 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 @@ -0,0 +1 @@ +a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 deleted file mode 100644 index d8a9dba20070b..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fb548c5264227813fd83991b94a705b0841c15f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..4adcca6a55902 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 @@ -0,0 +1 @@ +09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.0.jar.sha1 deleted file mode 100644 index b5ef7ee78e794..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adfa7c3d9b806969db75cf35fe4b286b3b8b1ce0 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 new file mode 100644 index 0000000000000..cd065dabb8e8a --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 @@ -0,0 +1 @@ +cb90049537b621e39610a110c58ce0b914ee3cc5 \ No newline at end of file diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index f8e1a1cc39ae0..d223f7989c688 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -298,7 +298,7 @@ private static class GoogleCloudStorageBlobStoreHttpHandler extends GoogleCloudS /** * HTTP handler that injects random Google Cloud Storage service errors - * + *

          * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java index c42cd1802f6e9..f6d078868b875 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -46,6 +46,8 @@ import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -92,14 +94,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super( - metadata, - getSetting(COMPRESS_SETTING, metadata), - namedXContentRegistry, - clusterService, - recoverySettings, - buildLocation(metadata) - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.storageService = storageService; String basePath = BASE_PATH.get(metadata.settings()); @@ -138,6 +133,15 @@ protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET); + restrictedSettings.add(BASE_PATH); + return restrictedSettings; + } + /** * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty. */ diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 445e1d65f3d3e..c9ebb3acaf3e5 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -228,7 +228,7 @@ StorageOptions createStorageOptions( } storageOptionsBuilder.setCredentials(serviceAccountCredentials); } - return storageOptionsBuilder.build(); + return SocketAccess.doPrivilegedException(() -> storageOptionsBuilder.build()); } /** diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 197e772df30d5..35127d6ea4060 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.gcs; +import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.SpecialPermission; import org.opensearch.common.CheckedRunnable; @@ -71,4 +72,16 @@ public static void doPrivilegedVoidIOException(CheckedRunnable acti throw (IOException) e.getCause(); } } + + public static T doPrivilegedException(PrivilegedExceptionAction operation) { + SpecialPermission.check(); + try { + return AccessController.doPrivileged(operation); + } catch (PrivilegedActionException e) { + Throwables.rethrow(e.getCause()); + assert false : "always throws"; + return null; + } + } + } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e2f5707ca4393..ed1f54888a26f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,7 +66,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.2' + api 'org.apache.avro:avro:1.11.3' api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" @@ -75,7 +75,7 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.9.0' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api 'org.apache.commons:commons-lang3:3.13.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' @@ -84,7 +84,7 @@ dependencies { api 'net.minidev:json-smart:2.5.0' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - implementation 'org.codehaus.woodstox:stax2-api:4.2.1' + implementation 'org.codehaus.woodstox:stax2-api:4.2.2' hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager diff --git a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 deleted file mode 100644 index ce1a894e0ce6d..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97e62e8be2b37e849f1bdb5a4f08121d47cc9806 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 new file mode 100644 index 0000000000000..fb43ecbcf22c9 --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 @@ -0,0 +1 @@ +02b463409b373bff9ece09f54a43d42da5cea55a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 deleted file mode 100644 index 800a4aa87ba0e..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ab4f082fd162f60afcaf2b8744a3d959feab3e8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..0e22f98daa61c --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 @@ -0,0 +1 @@ +911fdb5b1a1df36719c579ecc6f2957b88bce1ab \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a9aa34392903e --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +5ef15a3ce29a792b7ad17438e5f84c617b3f2993 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 deleted file mode 100644 index c6fa4cc175222..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afec3c414a0ab7264a66a7572e9e9d3a19a3e0e5 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java index b28d28d76cfde..f0ffec5713c1d 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java @@ -83,7 +83,7 @@ public HdfsRepository( final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super(metadata, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 068adc2cc9991..07d1d29eecfc4 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -53,7 +53,7 @@ /** * Oversees all the security specific logic for the HDFS Repository plugin. - * + *

          * Keeps track of the current user for a given repository, as well as which * permissions to grant the blob store restricted execution methods. */ diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java index 2758bd020e979..856cdf1eb565e 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java @@ -42,7 +42,7 @@ * thread leaks out of the client and is picked up by the test framework. This thread filter is meant * to ignore the offending thread until a version of Hadoop is released that addresses the incorrect * interrupt handling. - * + *

          * In Hadoop 3.3.6, the org.apache.hadoop.fs.statistics.impl.EvaluatingStatisticsMap uses ForkJoinPool * to perform statistics calculation, leaving dangling workers. * diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..5805fdaf411d1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +78489936ca1d91483e34a31d04a3b0812386eb39 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8e40c8826d76d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -795da37ded759e862457a82d9d92c4d39ce8ecee \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 4e8e7720572e8..da2c6e8c1b0ee 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -37,6 +37,8 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -51,10 +53,15 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; +import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; @@ -63,12 +70,18 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.StreamSupport; import fixture.s3.S3HttpHandler; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -152,6 +165,67 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10735") + @Override + public void testRequestStats() throws Exception { + final String repository = createRepository(randomName()); + final String index = "index-no-merges"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + final long nbDocs = randomLongBetween(10_000L, 20_000L); + try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) { + waitForDocs(nbDocs, indexer); + } + + flushAndRefresh(index); + ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + final String snapshot = "snapshot"; + assertSuccessfulSnapshot( + client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); + + assertAcked(client().admin().indices().prepareDelete(index)); + + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + ensureGreen(index); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get()); + + final RepositoryStats repositoryStats = StreamSupport.stream( + internalCluster().getInstances(RepositoriesService.class).spliterator(), + false + ).map(repositoriesService -> { + try { + return repositoriesService.repository(repository); + } catch (RepositoryMissingException e) { + return null; + } + }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + + Map> extendedStats = repositoryStats.extendedStats; + Map aggregatedStats = new HashMap<>(); + extendedStats.forEach((k, v) -> { + if (k == BlobStore.Metric.RETRY_COUNT || k == BlobStore.Metric.REQUEST_SUCCESS || k == BlobStore.Metric.REQUEST_FAILURE) { + for (Map.Entry entry : v.entrySet()) { + aggregatedStats.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + } + + }); + final Map mockCalls = getMockRequestCounts(); + + String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", aggregatedStats, mockCalls); + + assertEquals(assertionErrorMsg, mockCalls, aggregatedStats); + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -175,7 +249,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override public BlobStore blobStore() { @@ -225,7 +299,7 @@ private void validateAuthHeader(HttpExchange exchange) { /** * HTTP handler that injects random S3 service errors - * + *

          * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -263,6 +337,8 @@ public void maybeTrack(final String request, Headers requestHeaders) { trackRequest("PutMultipartObject"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutObject"); + } else if (Regex.simpleMatch("POST /*?delete*", request)) { + trackRequest("DeleteObjects"); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java index 0b5fcb6df280e..45170ea1ad209 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java @@ -29,6 +29,7 @@ public class AmazonAsyncS3Reference extends RefCountedReleasable { client.client().close(); client.priorityClient().close(); + client.urgentClient().close(); AwsCredentialsProvider credentials = client.credentials(); if (credentials instanceof Closeable) { try { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java index fa2db83729d25..f8a313b55d945 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java @@ -19,16 +19,19 @@ final class AmazonAsyncS3WithCredentials { private final S3AsyncClient client; private final S3AsyncClient priorityClient; + private final S3AsyncClient urgentClient; private final AwsCredentialsProvider credentials; private AmazonAsyncS3WithCredentials( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { this.client = client; this.credentials = credentials; this.priorityClient = priorityClient; + this.urgentClient = urgentClient; } S3AsyncClient client() { @@ -39,6 +42,10 @@ S3AsyncClient priorityClient() { return priorityClient; } + S3AsyncClient urgentClient() { + return urgentClient; + } + AwsCredentialsProvider credentials() { return credentials; } @@ -46,8 +53,9 @@ AwsCredentialsProvider credentials() { static AmazonAsyncS3WithCredentials create( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { - return new AmazonAsyncS3WithCredentials(client, priorityClient, credentials); + return new AmazonAsyncS3WithCredentials(client, priorityClient, urgentClient, credentials); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 08215ebdd45e0..262304029a0d3 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -103,6 +103,7 @@ public synchronized void refreshAndClearCache(Map clie */ public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -119,7 +120,7 @@ public AmazonAsyncS3Reference client( return existing; } final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( - buildClient(clientSettings, priorityExecutorBuilder, normalExecutorBuilder) + buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); clientReference.incRef(); clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); @@ -165,6 +166,7 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { // proxy for testing synchronized AmazonAsyncS3WithCredentials buildClient( final S3ClientSettings clientSettings, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -195,6 +197,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( builder.forcePathStyle(true); } + builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.asyncConfiguration( + ClientAsyncConfiguration.builder() + .advancedOption( + SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, + urgentExecutorBuilder.getFutureCompletionExecutor() + ) + .build() + ); + final S3AsyncClient urgentClient = SocketAccess.doPrivileged(builder::build); + builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup())); builder.asyncConfiguration( ClientAsyncConfiguration.builder() @@ -217,7 +230,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient client = SocketAccess.doPrivileged(builder::build); - return AmazonAsyncS3WithCredentials.create(client, priorityClient, credentials); + return AmazonAsyncS3WithCredentials.create(client, priorityClient, urgentClient, credentials); } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 2911a018df337..c1180aab0e0c7 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -113,13 +113,6 @@ class S3BlobContainer extends AbstractBlobContainer implements AsyncMultiStreamB private static final Logger logger = LogManager.getLogger(S3BlobContainer.class); - /** - * Maximum number of deletes in a {@link DeleteObjectsRequest}. - * - * @see S3 Documentation. - */ - private static final int MAX_BULK_DELETES = 1000; - private final S3BlobStore blobStore; private final String keyPath; @@ -202,11 +195,16 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { - S3AsyncClient s3AsyncClient = writeContext.getWritePriority() == WritePriority.HIGH - ? amazonS3Reference.get().priorityClient() - : amazonS3Reference.get().client(); + S3AsyncClient s3AsyncClient; + if (writeContext.getWritePriority() == WritePriority.URGENT) { + s3AsyncClient = amazonS3Reference.get().urgentClient(); + } else if (writeContext.getWritePriority() == WritePriority.HIGH) { + s3AsyncClient = amazonS3Reference.get().priorityClient(); + } else { + s3AsyncClient = amazonS3Reference.get().client(); + } CompletableFuture completableFuture = blobStore.getAsyncTransferManager() - .uploadObject(s3AsyncClient, uploadRequest, streamContext); + .uploadObject(s3AsyncClient, uploadRequest, streamContext, blobStore.getStatsMetricPublisher()); completableFuture.whenComplete((response, throwable) -> { if (throwable == null) { completionListener.onResponse(response); @@ -228,34 +226,39 @@ public void readBlobAsync(String blobName, ActionListener listener) try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { final S3AsyncClient s3AsyncClient = amazonS3Reference.get().client(); final String bucketName = blobStore.bucket(); + final String blobKey = buildKey(blobName); - final GetObjectAttributesResponse blobMetadata = getBlobMetadata(s3AsyncClient, bucketName, blobName).get(); + final CompletableFuture blobMetadataFuture = getBlobMetadata(s3AsyncClient, bucketName, blobKey); - final long blobSize = blobMetadata.objectSize(); - final int numberOfParts = blobMetadata.objectParts().totalPartsCount(); - final String blobChecksum = blobMetadata.checksum().checksumCRC32(); - - final List blobPartStreams = new ArrayList<>(); - final List> blobPartInputStreamFutures = new ArrayList<>(); - // S3 multipart files use 1 to n indexing - for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { - blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, partNumber)); - } - - CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)).whenComplete((unused, throwable) -> { - if (throwable == null) { - listener.onResponse( - new ReadContext( - blobSize, - blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), - blobChecksum - ) - ); - } else { + blobMetadataFuture.whenComplete((blobMetadata, throwable) -> { + if (throwable != null) { Exception ex = throwable.getCause() instanceof Exception ? (Exception) throwable.getCause() : new Exception(throwable.getCause()); listener.onFailure(ex); + return; + } + + try { + final List blobPartInputStreamFutures = new ArrayList<>(); + final long blobSize = blobMetadata.objectSize(); + final Integer numberOfParts = blobMetadata.objectParts() == null ? null : blobMetadata.objectParts().totalPartsCount(); + final String blobChecksum = blobMetadata.checksum() == null ? null : blobMetadata.checksum().checksumCRC32(); + + if (numberOfParts == null) { + blobPartInputStreamFutures.add(() -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); + } else { + // S3 multipart files use 1 to n indexing + for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { + final int innerPartNumber = partNumber; + blobPartInputStreamFutures.add( + () -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, innerPartNumber) + ); + } + } + listener.onResponse(new ReadContext(blobSize, blobPartInputStreamFutures, blobChecksum)); + } catch (Exception ex) { + listener.onFailure(ex); } }); } catch (Exception ex) { @@ -334,12 +337,12 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx outstanding = new HashSet<>(blobNames); } try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes + // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes final List deleteRequests = new ArrayList<>(); final List partition = new ArrayList<>(); for (String key : outstanding) { partition.add(key); - if (partition.size() == MAX_BULK_DELETES) { + if (partition.size() == blobStore.getBulkDeletesSize()) { deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); partition.clear(); } @@ -386,7 +389,7 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx assert outstanding.isEmpty(); } - private static DeleteObjectsRequest bulkDelete(String bucket, List blobs) { + private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { return DeleteObjectsRequest.builder() .bucket(bucket) .delete( @@ -395,6 +398,7 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs .quiet(true) .build() ) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) .build(); } @@ -685,41 +689,47 @@ static Tuple numberOfMultiparts(final long totalSize, final long par * the stream and its related metadata. * @param s3AsyncClient Async client to be utilized to fetch the object part * @param bucketName Name of the S3 bucket - * @param blobName Identifier of the blob for which the parts will be fetched - * @param partNumber Part number for the blob to be retrieved + * @param blobKey Identifier of the blob for which the parts will be fetched + * @param partNumber Optional part number for the blob to be retrieved * @return A future of {@link InputStreamContainer} containing the stream and stream metadata. */ CompletableFuture getBlobPartInputStreamContainer( S3AsyncClient s3AsyncClient, String bucketName, - String blobName, - int partNumber + String blobKey, + @Nullable Integer partNumber ) { - final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder() - .bucket(bucketName) - .key(blobName) - .partNumber(partNumber); + final boolean isMultipartObject = partNumber != null; + final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder().bucket(bucketName).key(blobKey); + + if (isMultipartObject) { + getObjectRequestBuilder.partNumber(partNumber); + } return SocketAccess.doPrivileged( () -> s3AsyncClient.getObject(getObjectRequestBuilder.build(), AsyncResponseTransformer.toBlockingInputStream()) - .thenApply(S3BlobContainer::transformResponseToInputStreamContainer) + .thenApply(response -> transformResponseToInputStreamContainer(response, isMultipartObject)) ); } /** * Transforms the stream response object from S3 into an {@link InputStreamContainer} * @param streamResponse Response stream object from S3 + * @param isMultipartObject Flag to denote a multipart object response * @return {@link InputStreamContainer} containing the stream and stream metadata */ // Package-Private for testing. - static InputStreamContainer transformResponseToInputStreamContainer(ResponseInputStream streamResponse) { + static InputStreamContainer transformResponseToInputStreamContainer( + ResponseInputStream streamResponse, + boolean isMultipartObject + ) { final GetObjectResponse getObjectResponse = streamResponse.response(); final String contentRange = getObjectResponse.contentRange(); final Long contentLength = getObjectResponse.contentLength(); - if (contentRange == null || contentLength == null) { + if ((isMultipartObject && contentRange == null) || contentLength == null) { throw SdkException.builder().message("Failed to fetch required metadata for blob part").build(); } - final Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()); + final long offset = isMultipartObject ? HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()) : 0L; return new InputStreamContainer(streamResponse, getObjectResponse.contentLength(), offset); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 8a5b92d71bb45..e8e043357e126 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -47,9 +47,18 @@ import org.opensearch.repositories.s3.async.AsyncTransferManager; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Locale; import java.util.Map; +import static org.opensearch.repositories.s3.S3Repository.BUCKET_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BUFFER_SIZE_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; +import static org.opensearch.repositories.s3.S3Repository.CANNED_ACL_SETTING; +import static org.opensearch.repositories.s3.S3Repository.SERVER_SIDE_ENCRYPTION_SETTING; +import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; + class S3BlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(S3BlobStore.class); @@ -58,21 +67,24 @@ class S3BlobStore implements BlobStore { private final S3AsyncService s3AsyncService; - private final String bucket; + private volatile String bucket; + + private volatile ByteSizeValue bufferSize; - private final ByteSizeValue bufferSize; + private volatile boolean serverSideEncryption; - private final boolean serverSideEncryption; + private volatile ObjectCannedACL cannedACL; - private final ObjectCannedACL cannedACL; + private volatile StorageClass storageClass; - private final StorageClass storageClass; + private volatile int bulkDeletesSize; - private final RepositoryMetadata repositoryMetadata; + private volatile RepositoryMetadata repositoryMetadata; private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher(); private final AsyncTransferManager asyncTransferManager; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final boolean multipartUploadEnabled; @@ -86,8 +98,10 @@ class S3BlobStore implements BlobStore { ByteSizeValue bufferSize, String cannedACL, String storageClass, + int bulkDeletesSize, RepositoryMetadata repositoryMetadata, AsyncTransferManager asyncTransferManager, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -99,14 +113,23 @@ class S3BlobStore implements BlobStore { this.bufferSize = bufferSize; this.cannedACL = initCannedACL(cannedACL); this.storageClass = initStorageClass(storageClass); + this.bulkDeletesSize = bulkDeletesSize; this.repositoryMetadata = repositoryMetadata; this.asyncTransferManager = asyncTransferManager; this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; + this.urgentExecutorBuilder = urgentExecutorBuilder; } - public boolean isMultipartUploadEnabled() { - return multipartUploadEnabled; + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + this.repositoryMetadata = repositoryMetadata; + this.bucket = BUCKET_SETTING.get(repositoryMetadata.settings()); + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(repositoryMetadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(repositoryMetadata.settings()); + this.cannedACL = initCannedACL(CANNED_ACL_SETTING.get(repositoryMetadata.settings())); + this.storageClass = initStorageClass(STORAGE_CLASS_SETTING.get(repositoryMetadata.settings())); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(repositoryMetadata.settings()); } @Override @@ -119,7 +142,7 @@ public AmazonS3Reference clientReference() { } public AmazonAsyncS3Reference asyncClientReference() { - return s3AsyncService.client(repositoryMetadata, priorityExecutorBuilder, normalExecutorBuilder); + return s3AsyncService.client(repositoryMetadata, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder); } int getMaxRetries() { @@ -138,6 +161,10 @@ public long bufferSizeInBytes() { return bufferSize.getBytes(); } + public int getBulkDeletesSize() { + return bulkDeletesSize; + } + @Override public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); @@ -158,6 +185,16 @@ public Map stats() { return statsMetricPublisher.getStats().toMap(); } + @Override + public Map> extendedStats() { + if (statsMetricPublisher.getExtendedStats() == null || statsMetricPublisher.getExtendedStats().isEmpty()) { + return Collections.emptyMap(); + } + Map> extendedStats = new HashMap<>(); + statsMetricPublisher.getExtendedStats().forEach((k, v) -> extendedStats.put(k, v.toMap())); + return extendedStats; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 933136228b1bb..2392c66329e06 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -446,7 +446,7 @@ S3ClientSettings refine(Settings repositorySettings) { /** * Load all client settings from the given settings. - * + *

          * Note this will always at least return a client named "default". */ static Map load(final Settings settings, final Path configPath) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 59111e94df22e..728a99b1220a6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -32,6 +32,9 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.StorageClass; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -41,9 +44,11 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.core.common.settings.SecureString; @@ -62,7 +67,11 @@ import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.threadpool.Scheduler; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -77,7 +86,6 @@ *

          {@code concurrent_streams}
          Number of concurrent read/write stream (per repository on each node). Defaults to 5.
          *
          {@code chunk_size}
          *
          Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to not chucked.
          - *
          {@code compress}
          If set to true metadata files will be stored compressed. Defaults to false.
          * */ class S3Repository extends MeteredBlobStoreRepository { @@ -182,6 +190,13 @@ class S3Repository extends MeteredBlobStoreRepository { new ByteSizeValue(5, ByteSizeUnit.TB) ); + /** + * Maximum number of deletes in a DeleteObjectsRequest. + * + * @see S3 Documentation. + */ + static final Setting BULK_DELETE_SIZE = Setting.intSetting("bulk_delete_size", 1000, 1, 1000); + /** * Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, * standard_ia, onezone_ia and intelligent_tiering. Defaults to standard. @@ -203,31 +218,30 @@ class S3Repository extends MeteredBlobStoreRepository { private final S3Service service; - private final String bucket; - - private final ByteSizeValue bufferSize; + private volatile String bucket; - private final ByteSizeValue chunkSize; + private volatile ByteSizeValue bufferSize; - private final BlobPath basePath; + private volatile ByteSizeValue chunkSize; - private final boolean serverSideEncryption; + private volatile BlobPath basePath; - private final String storageClass; + private volatile boolean serverSideEncryption; - private final String cannedACL; - - private final RepositoryMetadata repositoryMetadata; + private volatile String storageClass; + private volatile String cannedACL; private final AsyncTransferManager asyncUploadUtils; private final S3AsyncService s3AsyncService; private final boolean multipartUploadEnabled; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; + private final Path pluginConfigPath; - /** - * Constructs an s3 backed repository - */ + private volatile int bulkDeletesSize; + + // Used by test classes S3Repository( final RepositoryMetadata metadata, final NamedXContentRegistry namedXContentRegistry, @@ -235,82 +249,57 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, final boolean multipartUploadEnabled ) { - super( + this( metadata, - COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, + service, clusterService, recoverySettings, - buildLocation(metadata) + asyncUploadUtils, + urgentExecutorBuilder, + priorityExecutorBuilder, + normalExecutorBuilder, + s3AsyncService, + multipartUploadEnabled, + Path.of("") ); + } + + /** + * Constructs an s3 backed repository + */ + S3Repository( + final RepositoryMetadata metadata, + final NamedXContentRegistry namedXContentRegistry, + final S3Service service, + final ClusterService clusterService, + final RecoverySettings recoverySettings, + final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, + final AsyncExecutorContainer priorityExecutorBuilder, + final AsyncExecutorContainer normalExecutorBuilder, + final S3AsyncService s3AsyncService, + final boolean multipartUploadEnabled, + Path pluginConfigPath + ) { + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.service = service; this.s3AsyncService = s3AsyncService; this.multipartUploadEnabled = multipartUploadEnabled; - - this.repositoryMetadata = metadata; + this.pluginConfigPath = pluginConfigPath; this.asyncUploadUtils = asyncUploadUtils; + this.urgentExecutorBuilder = urgentExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.normalExecutorBuilder = normalExecutorBuilder; - // Parse and validate the user's S3 Storage Class setting - this.bucket = BUCKET_SETTING.get(metadata.settings()); - if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); - } - - this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - - // We make sure that chunkSize is bigger or equal than/to bufferSize - if (this.chunkSize.getBytes() < bufferSize.getBytes()) { - throw new RepositoryException( - metadata.name(), - CHUNK_SIZE_SETTING.getKey() - + " (" - + this.chunkSize - + ") can't be lower than " - + BUFFER_SIZE_SETTING.getKey() - + " (" - + bufferSize - + ")." - ); - } - - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - - this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); - - this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); - this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); - - if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - // provided repository settings - deprecationLogger.deprecate( - "s3_repository_secret_settings", - "Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the opensearch keystore for secure settings." - ); - } - - logger.debug( - "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", - bucket, - chunkSize, - serverSideEncryption, - bufferSize, - cannedACL, - storageClass - ); + validateRepositoryMetadata(metadata); + readRepositoryMetadata(); } private static Map buildLocation(RepositoryMetadata metadata) { @@ -365,14 +354,16 @@ protected S3BlobStore createBlobStore() { bufferSize, cannedACL, storageClass, - repositoryMetadata, + bulkDeletesSize, + metadata, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder ); } - // only use for testing + // only use for testing (S3RepositoryTests) @Override protected BlobStore getBlobStore() { return super.getBlobStore(); @@ -383,11 +374,142 @@ public BlobPath basePath() { return basePath; } + @Override + public boolean isReloadable() { + return true; + } + + @Override + public void reload(RepositoryMetadata newRepositoryMetadata) { + if (isReloadable() == false) { + return; + } + + // Reload configs for S3Repository + super.reload(newRepositoryMetadata); + readRepositoryMetadata(); + + // Reload configs for S3RepositoryPlugin + service.settings(metadata); + s3AsyncService.settings(metadata); + + // Reload configs for S3BlobStore + BlobStore blobStore = getBlobStore(); + blobStore.reload(metadata); + } + + /** + * Reloads the values derived from the Repository Metadata + */ + private void readRepositoryMetadata() { + this.bucket = BUCKET_SETTING.get(metadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); + this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(metadata.settings()); + if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { + // provided repository settings + deprecationLogger.deprecate( + "s3_repository_secret_settings", + "Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the opensearch keystore for secure settings." + ); + } + + logger.debug( + "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", + bucket, + chunkSize, + serverSideEncryption, + bufferSize, + cannedACL, + storageClass + ); + } + + @Override + public void validateMetadata(RepositoryMetadata newRepositoryMetadata) { + super.validateMetadata(newRepositoryMetadata); + validateRepositoryMetadata(newRepositoryMetadata); + } + + private void validateRepositoryMetadata(RepositoryMetadata newRepositoryMetadata) { + Settings settings = newRepositoryMetadata.settings(); + if (BUCKET_SETTING.get(settings) == null) { + throw new RepositoryException(newRepositoryMetadata.name(), "No bucket defined for s3 repository"); + } + + // We make sure that chunkSize is bigger or equal than/to bufferSize + if (CHUNK_SIZE_SETTING.get(settings).getBytes() < BUFFER_SIZE_SETTING.get(settings).getBytes()) { + throw new RepositoryException( + newRepositoryMetadata.name(), + CHUNK_SIZE_SETTING.getKey() + + " (" + + CHUNK_SIZE_SETTING.get(settings) + + ") can't be lower than " + + BUFFER_SIZE_SETTING.getKey() + + " (" + + BUFFER_SIZE_SETTING.get(settings) + + ")." + ); + } + + validateStorageClass(STORAGE_CLASS_SETTING.get(settings)); + validateCannedACL(CANNED_ACL_SETTING.get(settings)); + } + + private static void validateStorageClass(String storageClassStringValue) { + if ((storageClassStringValue == null) || storageClassStringValue.equals("")) { + return; + } + + final StorageClass storageClass = StorageClass.fromValue(storageClassStringValue.toUpperCase(Locale.ENGLISH)); + if (storageClass.equals(StorageClass.GLACIER)) { + throw new BlobStoreException("Glacier storage class is not supported"); + } + + if (storageClass == StorageClass.UNKNOWN_TO_SDK_VERSION) { + throw new BlobStoreException("`" + storageClassStringValue + "` is not a valid S3 Storage Class."); + } + } + + private static void validateCannedACL(String cannedACLStringValue) { + if ((cannedACLStringValue == null) || cannedACLStringValue.equals("")) { + return; + } + + for (final ObjectCannedACL cur : ObjectCannedACL.values()) { + if (cur.toString().equalsIgnoreCase(cannedACLStringValue)) { + return; + } + } + + throw new BlobStoreException("cannedACL is not valid: [" + cannedACLStringValue + "]"); + } + @Override protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET_SETTING); + restrictedSettings.add(BASE_PATH_SETTING); + return restrictedSettings; + } + @Override protected void doClose() { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 6ef60474afe8c..9ed232464d080 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -55,6 +56,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.FixedExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -73,6 +75,9 @@ * A plugin to add a repository type that writes to and from the AWS S3. */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + + private static final String URGENT_FUTURE_COMPLETION = "urgent_future_completion"; + private static final String URGENT_STREAM_READER = "urgent_stream_reader"; private static final String PRIORITY_FUTURE_COMPLETION = "priority_future_completion"; private static final String PRIORITY_STREAM_READER = "priority_stream_reader"; private static final String FUTURE_COMPLETION = "future_completion"; @@ -83,6 +88,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private final Path configPath; + private AsyncExecutorContainer urgentExecutorBuilder; private AsyncExecutorContainer priorityExecutorBuilder; private AsyncExecutorContainer normalExecutorBuilder; @@ -93,17 +99,25 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); + int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) + new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) ); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_STREAM_READER, priorityPoolCount(settings), 10_000, PRIORITY_STREAM_READER) + new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) ); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new FixedExecutorBuilder(settings, STREAM_READER, normalPoolCount(settings), 10_000, STREAM_READER)); + executorBuilders.add(new ScalingExecutorBuilder(STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); return executorBuilders; } + static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { + return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + } + S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { this.service = Objects.requireNonNull(service, "S3 service must not be null"); this.configPath = configPath; @@ -122,6 +136,10 @@ private static int allocatedProcessors(Settings settings) { return OpenSearchExecutors.allocatedProcessors(settings); } + private static int urgentPoolCount(Settings settings) { + return boundedBy((allocatedProcessors(settings) + 7) / 8, 1, 2); + } + private static int priorityPoolCount(Settings settings) { return boundedBy((allocatedProcessors(settings) + 1) / 2, 2, 4); } @@ -144,8 +162,14 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier ) { + int urgentEventLoopThreads = urgentPoolCount(clusterService.getSettings()); int priorityEventLoopThreads = priorityPoolCount(clusterService.getSettings()); int normalEventLoopThreads = normalPoolCount(clusterService.getSettings()); + this.urgentExecutorBuilder = new AsyncExecutorContainer( + threadPool.executor(URGENT_FUTURE_COMPLETION), + threadPool.executor(URGENT_STREAM_READER), + new AsyncTransferEventLoopGroup(urgentEventLoopThreads) + ); this.priorityExecutorBuilder = new AsyncExecutorContainer( threadPool.executor(PRIORITY_FUTURE_COMPLETION), threadPool.executor(PRIORITY_STREAM_READER), @@ -170,7 +194,8 @@ protected S3Repository createRepository( AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), normalExecutorBuilder.getStreamReader(), - priorityExecutorBuilder.getStreamReader() + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader() ); return new S3Repository( metadata, @@ -179,10 +204,12 @@ protected S3Repository createRepository( clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, - S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()) + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()), + configPath ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 3a35f6135f28b..d7e47e0ab1bcc 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -54,7 +54,7 @@ * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. - * + *

          * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ class S3RetryingInputStream extends InputStream { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index b13672b4179f8..b1b3e19eac275 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -90,6 +90,7 @@ import java.security.SecureRandom; import java.time.Duration; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import static java.util.Collections.emptyMap; @@ -100,7 +101,7 @@ class S3Service implements Closeable { private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com"; - private volatile Map clientsCache = emptyMap(); + private volatile Map clientsCache = new ConcurrentHashMap<>(); /** * Client settings calculated from static configuration and settings in the keystore. @@ -111,7 +112,7 @@ class S3Service implements Closeable { * Client settings derived from those in {@link #staticClientSettings} by combining them with settings * in the {@link RepositoryMetadata}. */ - private volatile Map derivedClientSettings = emptyMap(); + private volatile Map derivedClientSettings = new ConcurrentHashMap<>(); S3Service(final Path configPath) { staticClientSettings = MapBuilder.newMapBuilder() diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index cad0037f99249..0c63bfdb1ff97 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -8,10 +8,13 @@ package org.opensearch.repositories.s3; -import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import org.opensearch.common.blobstore.BlobStore; + +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -20,18 +23,67 @@ public class StatsMetricPublisher { private final Stats stats = new Stats(); + private final Map extendedStats = new HashMap<>() { + { + put(BlobStore.Metric.REQUEST_LATENCY, new Stats()); + put(BlobStore.Metric.REQUEST_SUCCESS, new Stats()); + put(BlobStore.Metric.REQUEST_FAILURE, new Stats()); + put(BlobStore.Metric.RETRY_COUNT, new Stats()); + } + }; + public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.listCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).listMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).listMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).listMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).listMetrics.addAndGet(1); + } + stats.listMetrics.addAndGet(1); + break; + } + } + } + + @Override + public void close() {} + }; + + public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).deleteMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).deleteMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).deleteMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).deleteMetrics.addAndGet(1); + } + stats.deleteMetrics.addAndGet(1); + break; + } + } } @Override @@ -41,15 +93,26 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.getCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).getMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).getMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).getMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).getMetrics.addAndGet(1); + } + stats.getMetrics.addAndGet(1); + break; + } + } } @Override @@ -59,15 +122,26 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.putCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).putMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).putMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).putMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).putMetrics.addAndGet(1); + } + stats.putMetrics.addAndGet(1); + break; + } + } } @Override @@ -77,15 +151,26 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.postCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).multiPartPutMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).multiPartPutMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).multiPartPutMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).multiPartPutMetrics.addAndGet(1); + } + stats.multiPartPutMetrics.addAndGet(1); + break; + } + } } @Override @@ -96,22 +181,29 @@ public Stats getStats() { return stats; } + public Map getExtendedStats() { + return extendedStats; + } + static class Stats { - final AtomicLong listCount = new AtomicLong(); + final AtomicLong listMetrics = new AtomicLong(); + + final AtomicLong getMetrics = new AtomicLong(); - final AtomicLong getCount = new AtomicLong(); + final AtomicLong putMetrics = new AtomicLong(); - final AtomicLong putCount = new AtomicLong(); + final AtomicLong deleteMetrics = new AtomicLong(); - final AtomicLong postCount = new AtomicLong(); + final AtomicLong multiPartPutMetrics = new AtomicLong(); Map toMap() { final Map results = new HashMap<>(); - results.put("GetObject", getCount.get()); - results.put("ListObjects", listCount.get()); - results.put("PutObject", putCount.get()); - results.put("PutMultipartObject", postCount.get()); + results.put("GetObject", getMetrics.get()); + results.put("ListObjects", listMetrics.get()); + results.put("PutObject", putMetrics.get()); + results.put("DeleteObjects", deleteMetrics.get()); + results.put("PutMultipartObject", multiPartPutMetrics.get()); return results; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index ad6939ce299d6..933ee6dc29513 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -23,10 +23,13 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -45,6 +48,7 @@ public class AsyncPartsHandler { * @param s3AsyncClient S3 client to use for upload * @param executorService Thread pool for regular upload * @param priorityExecutorService Thread pool for priority uploads + * @param urgentExecutorService Thread pool for urgent uploads * @param uploadRequest request for upload * @param streamContext Stream context used in supplying individual file parts * @param uploadId Upload Id against which multi-part is being performed @@ -57,6 +61,7 @@ public static List> uploadParts( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, UploadRequest uploadRequest, StreamContext streamContext, String uploadId, @@ -80,6 +85,7 @@ public static List> uploadParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, completedParts, inputStreamContainers, futures, @@ -126,6 +132,7 @@ private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, AtomicReferenceArray completedParts, AtomicReferenceArray inputStreamContainers, List> futures, @@ -135,29 +142,47 @@ private static void uploadPart( ) { Integer partNumber = uploadPartRequest.partNumber(); - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ) ); - CompletableFuture convertFuture = uploadPartResponseFuture.thenApply( - uploadPartResponse -> convertUploadPartResponse( - completedParts, - inputStreamContainers, - uploadPartResponse, - partNumber, - uploadRequest.doRemoteDataIntegrityCheck() - ) - ); + CompletableFuture convertFuture = uploadPartResponseFuture.whenComplete((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException ex) { + log.error( + () -> new ParameterizedMessage( + "Failed to close stream while uploading a part of idx {} and file {}.", + uploadPartRequest.partNumber(), + uploadPartRequest.key() + ), + ex + ); + } + }) + .thenApply( + uploadPartResponse -> convertUploadPartResponse( + completedParts, + inputStreamContainers, + uploadPartResponse, + partNumber, + uploadRequest.doRemoteDataIntegrityCheck() + ) + ); futures.add(convertFuture); CompletableFutureUtils.forwardExceptionTo(convertFuture, uploadPartResponseFuture); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 8d45c2167a3d1..4f1ab9764702e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -35,9 +35,12 @@ import org.opensearch.common.util.ByteUtils; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -58,6 +61,7 @@ public final class AsyncTransferManager { private static final Logger log = LogManager.getLogger(AsyncTransferManager.class); private final ExecutorService executorService; private final ExecutorService priorityExecutorService; + private final ExecutorService urgentExecutorService; private final long minimumPartSize; /** @@ -72,10 +76,16 @@ public final class AsyncTransferManager { * @param executorService The stream reader {@link ExecutorService} for normal priority uploads * @param priorityExecutorService The stream read {@link ExecutorService} for high priority uploads */ - public AsyncTransferManager(long minimumPartSize, ExecutorService executorService, ExecutorService priorityExecutorService) { + public AsyncTransferManager( + long minimumPartSize, + ExecutorService executorService, + ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService + ) { this.executorService = executorService; this.priorityExecutorService = priorityExecutorService; this.minimumPartSize = minimumPartSize; + this.urgentExecutorService = urgentExecutorService; } /** @@ -86,16 +96,21 @@ public AsyncTransferManager(long minimumPartSize, ExecutorService executorServic * @param streamContext The {@link StreamContext} to supply streams during upload * @return A {@link CompletableFuture} to listen for upload completion */ - public CompletableFuture uploadObject(S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext) { + public CompletableFuture uploadObject( + S3AsyncClient s3AsyncClient, + UploadRequest uploadRequest, + StreamContext streamContext, + StatsMetricPublisher statsMetricPublisher + ) { CompletableFuture returnFuture = new CompletableFuture<>(); try { if (streamContext.getNumberOfParts() == 1) { log.debug(() -> "Starting the upload as a single upload part request"); - uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture); + uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture, statsMetricPublisher); } else { log.debug(() -> "Starting the upload as multipart upload request"); - uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture); + uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, statsMetricPublisher); } } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); @@ -108,12 +123,14 @@ private void uploadInParts( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { CreateMultipartUploadRequest.Builder createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder() .bucket(uploadRequest.getBucket()) - .key(uploadRequest.getKey()); + .key(uploadRequest.getKey()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)); if (uploadRequest.doRemoteDataIntegrityCheck()) { createMultipartUploadRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); } @@ -152,6 +169,7 @@ private void doUploadInParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, uploadRequest, streamContext, uploadId, @@ -286,28 +304,42 @@ private void uploadInOneChunk( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, InputStreamContainer inputStreamContainer, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { PutObjectRequest.Builder putObjectRequestBuilder = PutObjectRequest.builder() .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) - .contentLength(uploadRequest.getContentLength()); + .contentLength(uploadRequest.getContentLength()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.putObjectMetricPublisher)); if (uploadRequest.doRemoteDataIntegrityCheck()) { putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); } - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ).handle((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException e) { + log.error( + () -> new ParameterizedMessage("Failed to close stream while uploading single file {}.", uploadRequest.getKey()), + e + ); + } if (throwable != null) { Throwable unwrappedThrowable = ExceptionsHelper.unwrap(throwable, S3Exception.class); if (unwrappedThrowable != null) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index a4bfe11383b4f..8e1926d40302f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -302,7 +302,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java index e9fe557ab751a..de9ad46bb222d 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java @@ -44,12 +44,12 @@ public void testCachedClientsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); @@ -79,12 +79,12 @@ public void testCachedClientsWithCredentialsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 8c8524212e08e..7c67519f2f3b0 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -64,6 +64,7 @@ import org.mockito.invocation.InvocationOnMock; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -265,10 +266,11 @@ public void verifySingleChunkUploadCallCount(boolean finalizeUploadFailure) { @Override public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { - return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, null)); + return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, asyncClient, null)); } } @@ -387,13 +389,16 @@ private S3BlobStore createBlobStore() { S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY), S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index ecad68474b601..ceab06bd051e9 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -95,6 +95,7 @@ import static org.opensearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.REGION; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -215,13 +216,16 @@ protected AsyncMultiStreamBlobContainer createBlobContainer( bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ) ) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index a87c060dcc60a..58ad290a31e85 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -81,7 +81,6 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; -import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -100,7 +99,6 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -278,10 +276,12 @@ public void testDelete() throws IOException { final String bucketName = randomAlphaOfLengthBetween(1, 10); final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); final S3Client client = mock(S3Client.class); doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); @@ -299,8 +299,11 @@ public void testDelete() throws IOException { when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); final List keysDeleted = new ArrayList<>(); + AtomicInteger deleteCount = new AtomicInteger(); doAnswer(invocation -> { DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); + deleteCount.getAndIncrement(); + logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); return DeleteObjectsResponse.builder().build(); }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); @@ -313,6 +316,8 @@ public void testDelete() throws IOException { // keysDeleted will have blobPath also assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); assertTrue(keysDeleted.contains(blobPath.buildAsString())); + // keysDeleted will have blobPath also + assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); keysDeleted.remove(blobPath.buildAsString()); assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); } @@ -919,7 +924,7 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberO testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } - public void testReadBlobAsync() throws Exception { + public void testReadBlobAsyncMultiPart() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); final String checksum = randomAlphaOfLength(10); @@ -930,13 +935,9 @@ public void testReadBlobAsync() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) - ); - final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( - 10000L, - mock(ExecutorService.class), - mock(ExecutorService.class) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); + final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -944,7 +945,6 @@ public void testReadBlobAsync() throws Exception { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.serverSideEncryption()).thenReturn(false); when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); - when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); getObjectAttributesResponseCompletableFuture.complete( @@ -976,7 +976,7 @@ public void testReadBlobAsync() throws Exception { assertEquals(objectSize, readContext.getBlobSize()); for (int partNumber = 1; partNumber < objectPartCount; partNumber++) { - InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber); + InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber).get().join(); final int offset = partNumber * partSize; assertEquals(partSize, inputStreamContainer.getContentLength()); assertEquals(offset, inputStreamContainer.getOffset()); @@ -984,6 +984,60 @@ public void testReadBlobAsync() throws Exception { } } + public void testReadBlobAsyncSinglePart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final int objectSize = 100; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize((long) objectSize) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectResponse(s3AsyncClient, bucketName, blobName, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(1, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get().get().join(); + assertEquals(objectSize, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length); + + } + public void testReadBlobAsyncFailure() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); @@ -994,13 +1048,9 @@ public void testReadBlobAsyncFailure() throws Exception { final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( - AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) - ); - final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( - 10000L, - mock(ExecutorService.class), - mock(ExecutorService.class) + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) ); + final S3BlobStore blobStore = mock(S3BlobStore.class); final BlobPath blobPath = new BlobPath(); @@ -1008,7 +1058,6 @@ public void testReadBlobAsyncFailure() throws Exception { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.serverSideEncryption()).thenReturn(false); when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); - when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); getObjectAttributesResponseCompletableFuture.complete( @@ -1032,6 +1081,51 @@ public void testReadBlobAsyncFailure() throws Exception { assertEquals(1, readContextActionListener.getFailureCount()); } + public void testReadBlobAsyncOnCompleteFailureMissingData() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().build()) + .objectSize(null) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(0, readContextActionListener.getResponseCount()); + assertEquals(1, readContextActionListener.getFailureCount()); + } + public void testGetBlobMetadata() throws Exception { final String checksum = randomAlphaOfLengthBetween(1, 10); final long objectSize = 100L; @@ -1071,7 +1165,7 @@ public void testGetBlobPartInputStream() throws Exception { final String blobName = randomAlphaOfLengthBetween(1, 10); final String bucketName = randomAlphaOfLengthBetween(1, 10); final long contentLength = 10L; - final String contentRange = "bytes 0-10/100"; + final String contentRange = "bytes 10-20/100"; final InputStream inputStream = ResponseInputStream.nullInputStream(); final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); @@ -1095,9 +1189,17 @@ public void testGetBlobPartInputStream() throws Exception { ) ).thenReturn(getObjectPartResponse); + // Header based offset in case of a multi part object request InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0) .get(); + assertEquals(10, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + + // 0 offset in case of a single part object request + inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, null).get(); + assertEquals(0, inputStreamContainer.getOffset()); assertEquals(contentLength, inputStreamContainer.getContentLength()); assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); @@ -1108,28 +1210,65 @@ public void testTransformResponseToInputStreamContainer() throws Exception { final long contentLength = 10L; final InputStream inputStream = ResponseInputStream.nullInputStream(); - final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); - GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build(); + // Exception when content range absent for multipart object ResponseInputStream responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream); - assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange)); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange, true)); + // No exception when content range absent for single part object + ResponseInputStream responseInputStreamNoRangeSinglePart = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer( + responseInputStreamNoRangeSinglePart, + false + ); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + + // Exception when length is absent getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build(); ResponseInputStream responseInputStreamNoContentLength = new ResponseInputStream<>( getObjectResponse, inputStream ); - assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength)); + assertThrows( + SdkException.class, + () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength, true) + ); + // No exception when range and length both are present getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build(); ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); - InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream); + inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream, true); assertEquals(contentLength, inputStreamContainer.getContentLength()); assertEquals(0, inputStreamContainer.getOffset()); assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); } + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { + + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength((long) objectSize).build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + + } + private void mockObjectPartResponse( S3AsyncClient s3AsyncClient, String bucketName, diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index 533c3aa17009d..6fec535ae6301 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -36,17 +36,20 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.nio.file.Path; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -122,7 +125,8 @@ public void testBasePathSetting() { } public void testDefaultBufferSize() { - final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + Settings settings = Settings.builder().build(); + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", settings); try (S3Repository s3repo = createS3Repo(metadata)) { assertThat(s3repo.getBlobStore(), is(nullValue())); s3repo.start(); @@ -133,6 +137,26 @@ public void testDefaultBufferSize() { } } + public void testIsReloadable() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + assertTrue(s3repo.isReloadable()); + } + } + + public void testRestrictedSettingsDefault() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + List> restrictedSettings = s3repo.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(S3Repository.BUCKET_SETTING)); + assertTrue(restrictedSettings.contains(S3Repository.BASE_PATH_SETTING)); + } + } + private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, @@ -144,6 +168,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { null, null, null, + null, false ) { @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 9c07b929052bc..2437547a80a6f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -33,12 +33,18 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.blobstore.ZeroInputStream; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -58,6 +64,7 @@ public void setUp() throws Exception { asyncTransferManager = new AsyncTransferManager( ByteSizeUnit.MB.toBytes(5), Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor() ); super.setUp(); @@ -70,17 +77,17 @@ public void testOneChunkUpload() { putObjectResponseCompletableFuture ); + AtomicReference streamRef = new AtomicReference<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing }, false, null), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 1 - ) + new StreamContext((partIdx, partSize, position) -> { + streamRef.set(new ZeroInputStream(partSize)); + return new InputStreamContainer(streamRef.get(), partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1), + new StatsMetricPublisher() ); try { @@ -90,6 +97,14 @@ public void testOneChunkUpload() { } verify(s3AsyncClient, times(1)).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); + + boolean closeError = false; + try { + streamRef.get().available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); } public void testOneChunkUploadCorruption() { @@ -118,7 +133,8 @@ public void testOneChunkUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1 - ) + ), + new StatsMetricPublisher() ); try { @@ -159,17 +175,18 @@ public void testMultipartUpload() { abortMultipartUploadResponseCompletableFuture ); + List streams = new ArrayList<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing }, true, 3376132981L), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 5 - ) + new StreamContext((partIdx, partSize, position) -> { + InputStream stream = new ZeroInputStream(partSize); + streams.add(stream); + return new InputStreamContainer(stream, partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5), + new StatsMetricPublisher() ); try { @@ -178,6 +195,16 @@ public void testMultipartUpload() { fail("did not expect resultFuture to fail"); } + streams.forEach(stream -> { + boolean closeError = false; + try { + stream.available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); + }); + verify(s3AsyncClient, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); verify(s3AsyncClient, times(5)).uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class)); verify(s3AsyncClient, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -219,7 +246,8 @@ public void testMultipartUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5 - ) + ), + new StatsMetricPublisher() ); try { diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java index 2cac58262c75a..e1655cc5e0784 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java @@ -47,9 +47,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java index 7390759029dfc..6f821147c3079 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java @@ -26,9 +26,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 45c9f522c09d8..f5c367cb7643b 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -28,7 +28,7 @@ dependencies { api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" - api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" + api "io.opentelemetry.semconv:opentelemetry-semconv:${versions.opentelemetrysemconv}" api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" @@ -37,6 +37,7 @@ dependencies { runtimeOnly "com.squareup.okhttp3:okhttp:4.11.0" runtimeOnly "com.squareup.okio:okio-jvm:3.5.0" runtimeOnly "io.opentelemetry:opentelemetry-exporter-sender-okhttp:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-extension-incubator:${versions.opentelemetry}-alpha" testImplementation "io.opentelemetry:opentelemetry-sdk-testing:${versions.opentelemetry}" } @@ -80,29 +81,12 @@ thirdPartyAudit { 'io.opentelemetry.api.events.EventEmitter', 'io.opentelemetry.api.events.EventEmitterBuilder', 'io.opentelemetry.api.events.EventEmitterProvider', - 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleHistogramBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedLongHistogramBuilder', 'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties', 'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.extension.incubator.metrics.DoubleCounterAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.DoubleGauge', - 'io.opentelemetry.extension.incubator.metrics.DoubleGaugeAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.DoubleHistogramAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.DoubleUpDownCounterAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleCounterBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleGaugeBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleUpDownCounterBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedLongCounterBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedLongGaugeBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedLongUpDownCounterBuilder', - 'io.opentelemetry.extension.incubator.metrics.LongCounterAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.LongGauge', - 'io.opentelemetry.extension.incubator.metrics.LongGaugeAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.LongHistogramAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.LongUpDownCounterAdviceConfigurer', - 'kotlin.io.path.PathsKt' + 'kotlin.io.path.PathsKt', + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' ) } diff --git a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties index 544f42bd5513b..8dec1119eec66 100644 --- a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties +++ b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties @@ -25,3 +25,23 @@ logger.exporter.name = io.opentelemetry.exporter.logging.LoggingSpanExporter logger.exporter.level = INFO logger.exporter.appenderRef.tracing.ref = tracing logger.exporter.additivity = false + + +appender.metrics.type = RollingFile +appender.metrics.name = metrics +appender.metrics.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics.log +appender.metrics.filePermissions = rw-r----- +appender.metrics.layout.type = PatternLayout +appender.metrics.layout.pattern = %m%n +appender.metrics.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics-%i.log.gz +appender.metrics.policies.type = Policies +appender.metrics.policies.size.type = SizeBasedTriggeringPolicy +appender.metrics.policies.size.size = 1GB +appender.metrics.strategy.type = DefaultRolloverStrategy +appender.metrics.strategy.max = 4 + + +logger.metrics_exporter.name = io.opentelemetry.exporter.logging.LoggingMetricExporter +logger.metrics_exporter.level = INFO +logger.metrics_exporter.appenderRef.tracing.ref = metrics +logger.metrics_exporter.additivity = false diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 deleted file mode 100644 index b0ce00e191830..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a32dfbd7f01de6711fd0e970f8d4b4c0405056d6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..eae141a8d1a23 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 @@ -0,0 +1 @@ +bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 deleted file mode 100644 index 84cb60a2f7acb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f665ff01ce6b964cdf0b8cb5cd1c196dfe94ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6e42973adc581 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 deleted file mode 100644 index eccb15f7b7c8e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f299d336dba1039478497f37b273dfa764c6faef \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..b119468e7f88b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 deleted file mode 100644 index 40537a399ab14..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f1a09e89955e6145babf8bcdf80c95174eb817 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8f653922d6418 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 @@ -0,0 +1 @@ +260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 deleted file mode 100644 index e88b7514ee54d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -15692246539571c41180aff2b55abe527b939a7b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..103da4720de96 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 deleted file mode 100644 index 86937743208c6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -947cf43a6411c4a323e14594431040a476ad43e8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..3db07532ceea9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 deleted file mode 100644 index 068926277253c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f3a14515500e4df260ce7b10a668237a95ac791 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..10d9b7cdfe3e3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..162890965a6eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 @@ -0,0 +1 @@ +6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 deleted file mode 100644 index d425ed61cc4cd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d15a9ea26e8e6ea93287a9f4ee02d91e5a74392 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..d6ce31a31cc6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 @@ -0,0 +1 @@ +2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 deleted file mode 100644 index 6b32d98b0f7c7..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e437ba87004bb63069d04fb06beae65b98dd13a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8a6a9705d836d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 deleted file mode 100644 index 13ef6de11e82d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5985d0950746ad12b49cc42c063f26ddfbcaaacb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..37d79f5c573f7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 deleted file mode 100644 index fc5aad9c9011e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b12825541c5dae52a0fb35045c1b36df3ca8f632 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..80179e4808f50 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 @@ -0,0 +1 @@ +47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 deleted file mode 100644 index ac522b765da05..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c5531fbc44178a7bcfeb7021ae80e70a7c43458 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..fd917a58ba77c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..77b12c99464f6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 @@ -0,0 +1 @@ +207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 deleted file mode 100644 index 089a2484dd1d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e8f7a97a4896a81846553275b9d61885be7ef50 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java similarity index 85% rename from plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java rename to plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java index ed4d13f3abb7d..45caf8bf5f60b 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java @@ -6,12 +6,9 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing; +package org.opensearch.telemetry; import org.opensearch.common.settings.Settings; -import org.opensearch.telemetry.OTelTelemetryPlugin; -import org.opensearch.telemetry.Telemetry; -import org.opensearch.telemetry.TelemetrySettings; import java.util.Optional; diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java new file mode 100644 index 0000000000000..74fc872cb30e3 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.util.Collection; +import java.util.List; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricExporter; + +public class InMemorySingletonMetricsExporter implements MetricExporter { + + public static final InMemorySingletonMetricsExporter INSTANCE = new InMemorySingletonMetricsExporter(InMemoryMetricExporter.create()); + + private static InMemoryMetricExporter delegate; + + public static InMemorySingletonMetricsExporter create() { + return INSTANCE; + } + + private InMemorySingletonMetricsExporter(InMemoryMetricExporter delegate) { + InMemorySingletonMetricsExporter.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection metrics) { + return delegate.export(metrics); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + public List getFinishedMetricItems() { + return delegate.getFinishedMetricItems(); + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java new file mode 100644 index 0000000000000..bcdcb657c4f42 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 1) +public class TelemetryMetricsDisabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityChecksWhenMetricsDisabled() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + + Thread.sleep(2000); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(counter instanceof NoopCounter); + } + +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java new file mode 100644 index 0000000000000..ed341595d327d --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; + +import io.opentelemetry.sdk.metrics.data.DoublePointData; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) +public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), true) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testCounter() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(1.0, value, 0.0); + } + + public void testUpDownCounter() throws Exception { + + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createUpDownCounter("test-up-down-counter", "test", "1"); + counter.add(1.0); + counter.add(-2.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-up-down-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(-1.0, value, 0.0); + } + + @After + public void reset() { + InMemorySingletonMetricsExporter.INSTANCE.reset(); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java index 949a58f6cab41..45ed140e1be94 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index 8a49a0abf5512..f07f2b308e801 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.tracing.attributes.Attributes; @@ -88,9 +89,7 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { ); InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; - if (!exporter.getFinishedSpanItems().isEmpty()) { - validators.validate(exporter.getFinishedSpanItems(), 6); - } + validators.validate(exporter.getFinishedSpanItems(), 6); } private static void updateTelemetrySetting(Client client, boolean value) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java similarity index 71% rename from plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java rename to plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java index 4d0966e6b5185..98d265e92ba3c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java @@ -6,7 +6,9 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing; +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.tags.Tags; import java.util.Locale; @@ -16,7 +18,7 @@ /** * Converts {@link org.opensearch.telemetry.tracing.attributes.Attributes} to OTel {@link Attributes} */ -final class OTelAttributesConverter { +public final class OTelAttributesConverter { /** * Constructor. @@ -28,7 +30,7 @@ private OTelAttributesConverter() {} * @param attributes attributes * @return otel attributes. */ - static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) { + public static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) { AttributesBuilder attributesBuilder = Attributes.builder(); if (attributes != null) { attributes.getAttributesMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder)); @@ -49,4 +51,17 @@ private static void addSpanAttribute(String key, Object value, AttributesBuilder throw new IllegalArgumentException(String.format(Locale.ROOT, "Span attribute value %s type not supported", value)); } } + + /** + * Attribute converter. + * @param tags attributes + * @return otel attributes. + */ + public static Attributes convert(Tags tags) { + AttributesBuilder attributesBuilder = Attributes.builder(); + if (tags != null) { + tags.getTagsMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder)); + } + return attributesBuilder.build(); + } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index 1af88196e3727..297ae8873636f 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -8,28 +8,36 @@ package org.opensearch.telemetry; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; -import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.tracing.OTelResourceProvider; import org.opensearch.telemetry.tracing.OTelTelemetry; -import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import java.util.Arrays; import java.util.List; import java.util.Optional; +import io.opentelemetry.sdk.OpenTelemetrySdk; + /** * Telemetry plugin based on Otel */ public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { + /** + * Instrumentation scope name. + */ + public static final String INSTRUMENTATION_SCOPE_NAME = "org.opensearch.telemetry"; + static final String OTEL_TRACER_NAME = "otel"; private final Settings settings; + private RefCountedReleasable refCountedOpenTelemetry; + /** * Creates Otel plugin * @param settings cluster settings @@ -44,23 +52,39 @@ public List> getSettings() { OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, - OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING ); } @Override public Optional getTelemetry(TelemetrySettings telemetrySettings) { + initializeOpenTelemetrySdk(telemetrySettings); return Optional.of(telemetry(telemetrySettings)); } + private void initializeOpenTelemetrySdk(TelemetrySettings telemetrySettings) { + if (refCountedOpenTelemetry != null) { + return; + } + OpenTelemetrySdk openTelemetrySdk = OTelResourceProvider.get(telemetrySettings, settings); + refCountedOpenTelemetry = new RefCountedReleasable<>("openTelemetry", openTelemetrySdk, openTelemetrySdk::close); + } + @Override public String getName() { return OTEL_TRACER_NAME; } private Telemetry telemetry(TelemetrySettings telemetrySettings) { - return new OTelTelemetry(new OTelTracingTelemetry(OTelResourceProvider.get(telemetrySettings, settings)), new MetricsTelemetry() { - }); + return new OTelTelemetry(refCountedOpenTelemetry); + } + + @Override + public void close() { + if (refCountedOpenTelemetry != null) { + refCountedOpenTelemetry.close(); + } } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java index 59c87cca22986..8e23f724b4570 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java @@ -11,13 +11,16 @@ import org.opensearch.SpecialPermission; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import io.opentelemetry.exporter.logging.LoggingMetricExporter; import io.opentelemetry.exporter.logging.LoggingSpanExporter; +import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.trace.export.SpanExporter; /** @@ -83,4 +86,28 @@ private OTelTelemetrySettings() {} Setting.Property.NodeScope, Setting.Property.Final ); + + /** + * Metrics Exporter type setting. + */ + @SuppressWarnings("unchecked") + public static final Setting> OTEL_METRICS_EXPORTER_CLASS_SETTING = new Setting<>( + "telemetry.otel.metrics.exporter.class", + LoggingMetricExporter.class.getName(), + className -> { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + + try { + return AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { + final ClassLoader loader = OTelMetricsExporterFactory.class.getClassLoader(); + return (Class) loader.loadClass(className); + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException("Unable to load span exporter class:" + className, ex.getCause()); + } + }, + Setting.Property.NodeScope, + Setting.Property.Final + ); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java new file mode 100644 index 0000000000000..b72f63e027243 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleCounter; + +/** + * OTel Counter + */ +class OTelCounter implements Counter { + + private final DoubleCounter otelDoubleCounter; + + /** + * Constructor + * @param otelDoubleCounter delegate counter. + */ + public OTelCounter(DoubleCounter otelDoubleCounter) { + this.otelDoubleCounter = otelDoubleCounter; + } + + @Override + public void add(double value) { + otelDoubleCounter.add(value); + } + + @Override + public void add(double value, Tags tags) { + otelDoubleCounter.add(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java new file mode 100644 index 0000000000000..6160e5106c041 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelTelemetryPlugin; + +import java.io.Closeable; +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleUpDownCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.sdk.OpenTelemetrySdk; + +/** + * OTel implementation for {@link MetricsTelemetry} + */ +public class OTelMetricsTelemetry implements MetricsTelemetry { + private final RefCountedReleasable refCountedOpenTelemetry; + private final Meter otelMeter; + private final T meterProvider; + + /** + * Creates OTel based {@link MetricsTelemetry}. + * @param openTelemetry open telemetry. + * @param meterProvider {@link MeterProvider} instance + */ + public OTelMetricsTelemetry(RefCountedReleasable openTelemetry, T meterProvider) { + this.refCountedOpenTelemetry = openTelemetry; + this.refCountedOpenTelemetry.incRef(); + this.meterProvider = meterProvider; + this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); + } + + @Override + public Counter createCounter(String name, String description, String unit) { + DoubleCounter doubleCounter = AccessController.doPrivileged( + (PrivilegedAction) () -> otelMeter.counterBuilder(name) + .setUnit(unit) + .setDescription(description) + .ofDoubles() + .build() + ); + return new OTelCounter(doubleCounter); + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + DoubleUpDownCounter doubleUpDownCounter = AccessController.doPrivileged( + (PrivilegedAction) () -> otelMeter.upDownCounterBuilder(name) + .setUnit(unit) + .setDescription(description) + .ofDoubles() + .build() + ); + return new OTelUpDownCounter(doubleUpDownCounter); + } + + @Override + public void close() throws IOException { + meterProvider.close(); + refCountedOpenTelemetry.close(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java new file mode 100644 index 0000000000000..2f40881996f7e --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleUpDownCounter; + +/** + * OTel Counter + */ +public class OTelUpDownCounter implements Counter { + + private final DoubleUpDownCounter doubleUpDownCounter; + + /** + * Constructor + * @param doubleUpDownCounter delegate counter. + */ + public OTelUpDownCounter(DoubleUpDownCounter doubleUpDownCounter) { + this.doubleUpDownCounter = doubleUpDownCounter; + } + + @Override + public void add(double value) { + doubleUpDownCounter.add(value); + } + + @Override + public void add(double value, Tags tags) { + doubleUpDownCounter.add(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java new file mode 100644 index 0000000000000..ef5a31e4003ca --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.SpecialPermission; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Method; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +/** + * Factory class to create the {@link MetricExporter} instance. + */ +public class OTelMetricsExporterFactory { + + private static final Logger logger = LogManager.getLogger(OTelMetricsExporterFactory.class); + + /** + * Base constructor. + */ + private OTelMetricsExporterFactory() { + + } + + /** + * Creates the {@link MetricExporter} instances based on the OTEL_METRIC_EXPORTER_CLASS_SETTING value. + * As of now, it expects the MetricExporter implementations to have a create factory method to instantiate the + * MetricExporter. + * @param settings settings. + * @return MetricExporter instance. + */ + public static MetricExporter create(Settings settings) { + Class MetricExporterProviderClass = OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.get(settings); + MetricExporter metricExporter = instantiateExporter(MetricExporterProviderClass); + logger.info("Successfully instantiated the Metrics MetricExporter class {}", MetricExporterProviderClass); + return metricExporter; + } + + private static MetricExporter instantiateExporter(Class exporterProviderClass) { + try { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + return AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + String methodName = "create"; + String getDefaultMethod = "getDefault"; + for (Method m : exporterProviderClass.getMethods()) { + if (m.getName().equals(getDefaultMethod)) { + methodName = getDefaultMethod; + break; + } + } + try { + return (MetricExporter) MethodHandles.publicLookup() + .findStatic(exporterProviderClass, methodName, MethodType.methodType(exporterProviderClass)) + .asType(MethodType.methodType(MetricExporter.class)) + .invokeExact(); + } catch (Throwable e) { + if (e.getCause() instanceof NoSuchMethodException) { + throw new IllegalStateException("No create factory method exist in [" + exporterProviderClass.getName() + "]"); + } else { + throw new IllegalStateException( + "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]", + e.getCause() + ); + } + } + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException( + "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]", + ex.getCause() + ); + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java new file mode 100644 index 0000000000000..b48ec3e2336c4 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.metrics.exporter; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..803c159eb201a --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.metrics; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index fe05cc8bb7a41..14a19f122c17b 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -10,6 +10,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; import org.opensearch.telemetry.tracing.sampler.RequestSampler; @@ -18,17 +19,18 @@ import java.security.PrivilegedAction; import java.util.concurrent.TimeUnit; -import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; import io.opentelemetry.context.propagation.ContextPropagators; import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.sdk.trace.samplers.Sampler; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import io.opentelemetry.semconv.ResourceAttributes; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; @@ -44,11 +46,11 @@ private OTelResourceProvider() {} * Creates OpenTelemetry instance with default configuration * @param telemetrySettings telemetry settings * @param settings cluster settings - * @return OpenTelemetry instance + * @return OpenTelemetrySdk instance */ - public static OpenTelemetry get(TelemetrySettings telemetrySettings, Settings settings) { + public static OpenTelemetrySdk get(TelemetrySettings telemetrySettings, Settings settings) { return AccessController.doPrivileged( - (PrivilegedAction) () -> get( + (PrivilegedAction) () -> get( settings, OTelSpanExporterFactory.create(settings), ContextPropagators.create(W3CTraceContextPropagator.getInstance()), @@ -63,17 +65,46 @@ public static OpenTelemetry get(TelemetrySettings telemetrySettings, Settings se * @param spanExporter span exporter instance * @param contextPropagators context propagator instance * @param sampler sampler instance - * @return Opentelemetry instance + * @return OpenTelemetrySdk instance */ - public static OpenTelemetry get(Settings settings, SpanExporter spanExporter, ContextPropagators contextPropagators, Sampler sampler) { + public static OpenTelemetrySdk get( + Settings settings, + SpanExporter spanExporter, + ContextPropagators contextPropagators, + Sampler sampler + ) { Resource resource = Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "OpenSearch")); - SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + SdkTracerProvider sdkTracerProvider = createSdkTracerProvider(settings, spanExporter, sampler, resource); + SdkMeterProvider sdkMeterProvider = createSdkMetricProvider(settings, resource); + return OpenTelemetrySdk.builder() + .setTracerProvider(sdkTracerProvider) + .setMeterProvider(sdkMeterProvider) + .setPropagators(contextPropagators) + .buildAndRegisterGlobal(); + } + + private static SdkMeterProvider createSdkMetricProvider(Settings settings, Resource resource) { + return SdkMeterProvider.builder() + .setResource(resource) + .registerMetricReader( + PeriodicMetricReader.builder(OTelMetricsExporterFactory.create(settings)) + .setInterval(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) + .build() + ) + .build(); + } + + private static SdkTracerProvider createSdkTracerProvider( + Settings settings, + SpanExporter spanExporter, + Sampler sampler, + Resource resource + ) { + return SdkTracerProvider.builder() .addSpanProcessor(spanProcessor(settings, spanExporter)) .setResource(resource) .setSampler(sampler) .build(); - - return OpenTelemetrySdk.builder().setTracerProvider(sdkTracerProvider).setPropagators(contextPropagators).buildAndRegisterGlobal(); } private static BatchSpanProcessor spanProcessor(Settings settings, SpanExporter spanExporter) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java index 8ad03d807d9da..fc917968579e1 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java @@ -57,7 +57,9 @@ public void addAttribute(String key, Boolean value) { @Override public void setError(Exception exception) { - delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + if (exception != null) { + delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + } } @Override diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java index 282fabd43346b..0c697d2cc5e8c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -8,34 +8,39 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; + +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * Otel implementation of Telemetry */ public class OTelTelemetry implements Telemetry { - private final TracingTelemetry tracingTelemetry; - private final MetricsTelemetry metricsTelemetry; + private final RefCountedReleasable refCountedOpenTelemetry; /** * Creates Telemetry instance - * @param tracingTelemetry tracing telemetry - * @param metricsTelemetry metrics telemetry + + */ + /** + * Creates Telemetry instance + * @param refCountedOpenTelemetry open telemetry. */ - public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { - this.tracingTelemetry = tracingTelemetry; - this.metricsTelemetry = metricsTelemetry; + public OTelTelemetry(RefCountedReleasable refCountedOpenTelemetry) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; } @Override public TracingTelemetry getTracingTelemetry() { - return tracingTelemetry; + return new OTelTracingTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkTracerProvider()); } @Override public MetricsTelemetry getMetricsTelemetry() { - return metricsTelemetry; + return new OTelMetricsTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkMeterProvider()); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java index 53066ad4ad444..af39617a8c744 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -8,41 +8,41 @@ package org.opensearch.telemetry.tracing; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; import java.io.IOException; -import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.TracerProvider; import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * OTel based Telemetry provider */ -public class OTelTracingTelemetry implements TracingTelemetry { - - private static final Logger logger = LogManager.getLogger(OTelTracingTelemetry.class); - private final OpenTelemetry openTelemetry; +public class OTelTracingTelemetry implements TracingTelemetry { + private final RefCountedReleasable refCountedOpenTelemetry; + private final T tracerProvider; private final io.opentelemetry.api.trace.Tracer otelTracer; /** - * Creates OTel based Telemetry - * @param openTelemetry OpenTelemetry instance + * Creates OTel based {@link TracingTelemetry} + * @param refCountedOpenTelemetry OpenTelemetry instance + * @param tracerProvider {@link TracerProvider} instance. */ - public OTelTracingTelemetry(OpenTelemetry openTelemetry) { - this.openTelemetry = openTelemetry; - this.otelTracer = openTelemetry.getTracer("os-tracer"); - + public OTelTracingTelemetry(RefCountedReleasable refCountedOpenTelemetry, T tracerProvider) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; + this.refCountedOpenTelemetry.incRef(); + this.tracerProvider = tracerProvider; + this.otelTracer = tracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @Override - public void close() { - try { - ((Closeable) openTelemetry).close(); - } catch (IOException e) { - logger.warn("Error while closing Opentelemetry", e); - } + public void close() throws IOException { + tracerProvider.close(); + refCountedOpenTelemetry.close(); } @Override @@ -52,7 +52,7 @@ public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) @Override public TracingContextPropagator getContextPropagator() { - return new OTelTracingContextPropagator(openTelemetry); + return new OTelTracingContextPropagator(refCountedOpenTelemetry.get()); } private Span createOtelSpan(SpanCreationContext spanCreationContext, Span parentSpan) { diff --git a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy index 726db3d3f4700..9d529ed5a2a56 100644 --- a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy @@ -11,6 +11,7 @@ grant { permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.net.NetPermission "getProxySelector"; permission java.net.SocketPermission "*", "connect,resolve"; + permission java.util.PropertyPermission "*", "read,write"; }; diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java index 8c2b5d14733e2..2fcf89947e537 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -12,12 +12,15 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -25,6 +28,7 @@ import java.util.Set; import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; @@ -34,41 +38,47 @@ public class OTelTelemetryPluginTests extends OpenSearchTestCase { - private OTelTelemetryPlugin oTelTracerModulePlugin; + private OTelTelemetryPlugin oTelTelemetryPlugin; private Optional telemetry; private TracingTelemetry tracingTelemetry; + private MetricsTelemetry metricsTelemetry; + @Before public void setup() { // TRACER_EXPORTER_DELAY_SETTING should always be less than 10 seconds because // io.opentelemetry.sdk.OpenTelemetrySdk.close waits only for 10 seconds for shutdown to complete. Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); - oTelTracerModulePlugin = new OTelTelemetryPlugin(settings); - telemetry = oTelTracerModulePlugin.getTelemetry( + oTelTelemetryPlugin = new OTelTelemetryPlugin(settings); + telemetry = oTelTelemetryPlugin.getTelemetry( new TelemetrySettings(Settings.EMPTY, new ClusterSettings(settings, Set.of(TRACER_ENABLED_SETTING, TRACER_SAMPLER_PROBABILITY))) ); tracingTelemetry = telemetry.get().getTracingTelemetry(); + metricsTelemetry = telemetry.get().getMetricsTelemetry(); } public void testGetTelemetry() { Set> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); - assertEquals(OTEL_TRACER_NAME, oTelTracerModulePlugin.getName()); + assertEquals(OTEL_TRACER_NAME, oTelTelemetryPlugin.getName()); assertTrue(tracingTelemetry instanceof OTelTracingTelemetry); + assertTrue(metricsTelemetry instanceof OTelMetricsTelemetry); assertEquals( Arrays.asList( TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, - OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING + OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, + OTEL_METRICS_EXPORTER_CLASS_SETTING ), - oTelTracerModulePlugin.getSettings() + oTelTelemetryPlugin.getSettings() ); } @After - public void cleanup() { + public void cleanup() throws IOException { tracingTelemetry.close(); + metricsTelemetry.close(); } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java new file mode 100644 index 0000000000000..9de575b69774a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.OTelTelemetryPlugin; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleCounterBuilder; +import io.opentelemetry.api.metrics.DoubleUpDownCounter; +import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder; +import io.opentelemetry.api.metrics.LongCounterBuilder; +import io.opentelemetry.api.metrics.LongUpDownCounterBuilder; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class OTelMetricsTelemetryTests extends OpenSearchTestCase { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCounter() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); + LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); + DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder); + when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter); + + Counter counter = metricsTelemetry.createCounter(counterName, description, unit); + counter.add(1.0); + verify(mockOTelDoubleCounter).add(1.0); + Tags tags = Tags.create().addTag("test", "test"); + counter.add(2.0, tags); + verify(mockOTelDoubleCounter).add(2.0, OTelAttributesConverter.convert(tags)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCounterNegativeValue() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Meter mockMeter = mock(Meter.class); + DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); + LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); + DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); + + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder); + when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter); + + Counter counter = metricsTelemetry.createCounter(counterName, description, unit); + counter.add(-1.0); + verify(mockOTelDoubleCounter).add(-1.0); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testUpDownCounter() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Meter mockMeter = mock(Meter.class); + DoubleUpDownCounter mockOTelUpDownDoubleCounter = mock(DoubleUpDownCounter.class); + LongUpDownCounterBuilder mockOTelLongUpDownCounterBuilder = mock(LongUpDownCounterBuilder.class); + DoubleUpDownCounterBuilder mockOTelDoubleUpDownCounterBuilder = mock(DoubleUpDownCounterBuilder.class); + + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.upDownCounterBuilder(counterName)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.setDescription(description)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleUpDownCounterBuilder); + when(mockOTelDoubleUpDownCounterBuilder.build()).thenReturn(mockOTelUpDownDoubleCounter); + + Counter counter = metricsTelemetry.createUpDownCounter(counterName, description, unit); + counter.add(1.0); + verify(mockOTelUpDownDoubleCounter).add(1.0); + Tags tags = Tags.create().addTag("test", "test"); + counter.add(-2.0, tags); + verify(mockOTelUpDownDoubleCounter).add((-2.0), OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java new file mode 100644 index 0000000000000..65c52911dbef9 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import java.util.Collection; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +public class DummyMetricExporter implements MetricExporter { + @Override + public CompletableResultCode export(Collection metrics) { + return null; + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return null; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java new file mode 100644 index 0000000000000..e68da030bfb52 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.exporter.logging.LoggingMetricExporter; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter; +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +public class OTelMetricsExporterFactoryTests extends OpenSearchTestCase { + + public void testMetricsExporterDefault() { + Settings settings = Settings.builder().build(); + MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings); + assertTrue(metricExporter instanceof LoggingMetricExporter); + } + + public void testMetricsExporterLogging() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "io.opentelemetry.exporter.logging.LoggingMetricExporter" + ) + .build(); + MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings); + assertTrue(metricExporter instanceof LoggingMetricExporter); + } + + public void testMetricExporterInvalid() { + Settings settings = Settings.builder().put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "abc").build(); + assertThrows(IllegalArgumentException.class, () -> OTelMetricsExporterFactory.create(settings)); + } + + public void testMetricExporterNoCreateFactoryMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.exporter.DummyMetricExporter" + ) + .build(); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings)); + assertEquals( + "MetricExporter instantiation failed for class [org.opensearch.telemetry.metrics.exporter.DummyMetricExporter]", + exception.getMessage() + ); + } + + public void testMetricExporterNonMetricExporterClass() { + Settings settings = Settings.builder() + .put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "java.lang.String") + .build(); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings)); + assertEquals("MetricExporter instantiation failed for class [java.lang.String]", exception.getMessage()); + assertTrue(exception.getCause() instanceof NoSuchMethodError); + + } + + public void testMetricExporterGetDefaultMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter" + ) + .build(); + + assertTrue(OTelMetricsExporterFactory.create(settings) instanceof OtlpGrpcMetricExporter); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java index d992daec1b7bb..ee67384d01759 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; @@ -19,13 +21,13 @@ public class OTelAttributesConverterTests extends OpenSearchTestCase { public void testConverterNullAttributes() { - io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(null); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert((Attributes) null); assertEquals(0, otelAttributes.size()); } public void testConverterEmptyAttributes() { Attributes attributes = Attributes.EMPTY; - io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(null); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(attributes); assertEquals(0, otelAttributes.size()); } @@ -47,4 +49,12 @@ public void testConverterMultipleAttributes() { assertEquals(4, otelAttributes.size()); otelAttributes.asMap().forEach((x, y) -> assertEquals(attributeMap.get(x.getKey()), y)); } + + public void testConverterMultipleTags() { + Tags tags = Tags.create().addTag("key1", 1l).addTag("key2", 1.0).addTag("key3", true).addTag("key4", "value4"); + Map tagsMap = tags.getTagsMap(); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(tags); + assertEquals(4, otelAttributes.size()); + otelAttributes.asMap().forEach((x, y) -> assertEquals(tagsMap.get(x.getKey()), y)); + } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java index 505756318ff62..1f0c2f674e655 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -8,16 +8,16 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelTelemetryPlugin; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; -import java.util.Collections; -import java.util.Map; - import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.trace.SpanBuilder; import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.api.trace.TracerProvider; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -26,29 +26,34 @@ import static org.mockito.Mockito.when; public class OTelTracingTelemetryTests extends OpenSearchTestCase { - + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithoutParent() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); - Map attributeMap = Collections.singletonMap("name", "value"); Attributes attributes = Attributes.create().addAttribute("name", "value"); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null); verify(mockSpanBuilder, never()).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes)); assertNull(span.getParentSpan()); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithParent() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); @@ -58,7 +63,10 @@ public void testCreateSpanWithParent() { Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create().addAttribute("name", 1l); Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); @@ -69,10 +77,12 @@ public void testCreateSpanWithParent() { assertEquals("parent_span", span.getParentSpan().getSpanName()); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithParentWithMultipleAttributes() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); @@ -82,7 +92,10 @@ public void testCreateSpanWithParentWithMultipleAttributes() { Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create() .addAttribute("key1", 1l) .addAttribute("key2", 2.0) @@ -115,12 +128,17 @@ private io.opentelemetry.api.common.Attributes createAttributeLong(Attributes at return attributesBuilder.build(); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testGetContextPropagator() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); } diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java index d25ef33c2ce29..5abd6f2710198 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java @@ -257,7 +257,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + *

          * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java index dfa72d6d59a0d..55920bab4efd3 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java @@ -52,6 +52,7 @@ import org.opensearch.nio.NioSelector; import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpTransport; import org.opensearch.transport.TransportSettings; @@ -84,9 +85,10 @@ protected NioTransport( PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, - NioGroupFactory groupFactory + NioGroupFactory groupFactory, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.groupFactory = groupFactory; } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java index ec266d76eff3d..d4be876867651 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java @@ -91,7 +91,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( NIO_TRANSPORT_NAME, @@ -103,7 +104,8 @@ public Map> getTransports( pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - getNioGroupFactory(settings) + getNioGroupFactory(settings), + tracer ) ); } diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java index 24cc38c17a9d1..f5d1c618f5ace 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.AbstractSimpleTransportTestCase; @@ -81,7 +82,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService(), - new NioGroupFactory(settings, logger) + new NioGroupFactory(settings, logger), + NoopTracer.INSTANCE ) { @Override diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index aabc3aee8887f..2675e9b62de35 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -78,7 +78,7 @@ * PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test * does best effort verifying that we don't break bwc for query builders between the first previous major version and * the latest current major release. - * + *

          * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java index c3c332aecfd4c..8ca90791f649e 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java @@ -65,7 +65,7 @@ /** * Create a simple "daemon controller", put it in the right place and check that it runs. - * + *

          * Extends LuceneTestCase rather than OpenSearchTestCase as OpenSearchTestCase installs a system call filter, and * that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other * tests that extend OpenSearchTestCase for the same reason. diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 66c6525439dac..082ed5277575a 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -70,6 +70,11 @@ tasks.dependenciesInfo.enabled = false tasks.thirdPartyAudit.ignoreMissingClasses() +tasks.thirdPartyAudit.ignoreViolations( + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' +) + tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java index 02a613be320c2..4bb3877fc04a8 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java @@ -441,7 +441,7 @@ public static Path createTempDir(String prefix) throws IOException { /** * Run the given action with a temporary copy of the config directory. - * + *

          * Files under the path passed to the action may be modified as necessary for the * test to execute, and running OpenSearch with {@link #startOpenSearch()} will * use the temporary directory. diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java index 7904d1a046916..958de24848178 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java @@ -51,7 +51,7 @@ /** * Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission * set is what we expect. - * + *

          * This class saves information about its failed matches in instance variables and so instances should not be reused */ public class FileMatcher extends TypeSafeMatcher { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java index 25cefa948ff10..26af39d66cad3 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java @@ -137,7 +137,7 @@ public static Installation ofContainer(Shell sh, Distribution distribution) { /** * Returns the user that owns this installation. - * + *

          * For packages this is root, and for archives it is the user doing the installation. */ public String getOwner() { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index b80ae422bda9a..e9ebf28042b46 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis // we shell out here because java's posix file permission view doesn't support special modes assertThat(opensearch.config, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660))); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index aef363058b394..f963f8d221bb5 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -98,11 +98,11 @@ private void waitForSearchableDocs(String index, int shardCount, int replicaCoun // Verify segment store assertBusy(() -> { - /** - * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by - * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging - * to primary while remaining *replicaCount* records belongs to replica copies - * */ + /* + Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by + line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging + to primary while remaining *replicaCount* records belongs to replica copies + */ Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica"); segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count"); Response segrepStatsResponse = client().performRequest(segrepStatsRequest); @@ -259,7 +259,8 @@ public void testIndexing() throws Exception { * This test verifies that during rolling upgrades the segment replication does not break when replica shards can * be running on older codec versions. * - * @throws Exception exception + * @throws Exception if index creation fail + * @throws UnsupportedOperationException if cluster type is unknown */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679") public void testIndexingWithSegRep() throws Exception { diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md new file mode 100644 index 0000000000000..040cc053469ed --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -0,0 +1,68 @@ +## 2023-10-12 Version 2.11.0 Release Notes + +## [2.11] + +### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) +- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Add parallel file download support for remote store based replication ([#8596](https://github.com/opensearch-project/OpenSearch/pull/8596)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) + +### Dependencies +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) +- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) +- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) +- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) +- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) +- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) + +### Changed +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) +- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) +- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) +- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) + +### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) + +### Fixed +- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 3f79227ce64e8..784c7b52b18b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -141,8 +141,8 @@ --- "Metric - indexing doc_status": - skip: - version: " - 2.99.99" - reason: "To be introduced in future release :: TODO: change if/when we backport to 2.x" + version: " - 2.10.99" + reason: "Doc Status Stats were introduced in v2.11.0" - do: nodes.info: {} - set: diff --git a/server/build.gradle b/server/build.gradle index f6db3d53a0dcc..c56f9d5aa288f 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -364,7 +364,9 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor' + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' ) } diff --git a/server/licenses/log4j-api-2.20.0.jar.sha1 b/server/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/server/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/server/licenses/log4j-api-2.21.0.jar.sha1 b/server/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/server/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/server/licenses/log4j-core-2.20.0.jar.sha1 b/server/licenses/log4j-core-2.20.0.jar.sha1 deleted file mode 100644 index 49c972626563b..0000000000000 --- a/server/licenses/log4j-core-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb2a9a47b1396e00b5eee1264296729a70565cc0 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.21.0.jar.sha1 b/server/licenses/log4j-core-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..c88e6f7a25ca9 --- /dev/null +++ b/server/licenses/log4j-core-2.21.0.jar.sha1 @@ -0,0 +1 @@ +122e1a9e0603cc9eae07b0846a6ff01f2454bc49 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.20.0.jar.sha1 b/server/licenses/log4j-jul-2.20.0.jar.sha1 deleted file mode 100644 index a456651e4569e..0000000000000 --- a/server/licenses/log4j-jul-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8170e6118eac1ab332046c179718a0f107f688e1 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.21.0.jar.sha1 b/server/licenses/log4j-jul-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..480010840abca --- /dev/null +++ b/server/licenses/log4j-jul-2.21.0.jar.sha1 @@ -0,0 +1 @@ +f0da61113f4a47654677e6a98b1e13ca7de2483d \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 4ac89f2e792d7..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c82be3d997d781bb72d6d0eadade064dd2cd6db \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..6ad304fa52c12 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 @@ -0,0 +1 @@ +36f0363325ca7bf62c180160d1ed5165c7c37795 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 624b5174a444f..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c261d17c681c0d91171c67e192abfef59adea2e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f104c4207d390 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 @@ -0,0 +1 @@ +e98fb408028f40170e6d87c16422bfdc0bb2e392 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 70baf1270cd5d..0000000000000 --- a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0.jar.sha1 b/server/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/server/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 20ddb9ae3ef27..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d1cf3d6db43fad6630376ba59451f848f4d387c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0.jar.sha1 b/server/licenses/lucene-grouping-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ab132121b2edc --- /dev/null +++ b/server/licenses/lucene-grouping-9.8.0.jar.sha1 @@ -0,0 +1 @@ +d39184518351178c404ed9669fc6cb6111f2288d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c3ad03ca53b13..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83ab97638bb5269f950d75bba5675d3cfb63f2fa \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..c7cb678fb7b72 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 @@ -0,0 +1 @@ +1ac38c8278dbd63dfab30744a41dd955a415a31c \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c2a4c5334b314..0000000000000 --- a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97c26362151908dc892263edda3872abbacb71a8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0.jar.sha1 b/server/licenses/lucene-join-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2b6cb8af4faf6 --- /dev/null +++ b/server/licenses/lucene-join-9.8.0.jar.sha1 @@ -0,0 +1 @@ +3d64fc57bb6e718d906413a9f73c713e6d4d8bb0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 32534d07e47dc..0000000000000 --- a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8337eddc0dddd0d7dd50c5aa0d17e5e31592f9fa \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0.jar.sha1 b/server/licenses/lucene-memory-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..5fdfee401dd0a --- /dev/null +++ b/server/licenses/lucene-memory-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5283ac71d6ccecb5e00c7b52df2faec012f2625a \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 7db245cc521c7..0000000000000 --- a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2e3fae930295f0e2b401effe04eafc25692a414 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0.jar.sha1 b/server/licenses/lucene-misc-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..cf815cba15862 --- /dev/null +++ b/server/licenses/lucene-misc-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9a57b049cf51a5e9c9c1909c420f645f1b6f9a54 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index d01a6d733196e..0000000000000 --- a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e88d8a464e6cfa345b946c9c8822ba7ee2a9159f \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0.jar.sha1 b/server/licenses/lucene-queries-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..09f369ef18e12 --- /dev/null +++ b/server/licenses/lucene-queries-9.8.0.jar.sha1 @@ -0,0 +1 @@ +628db4ef46f1c6a05145bdac1d1bc4ace6341b13 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c7b9640bad170..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9905790675c01e8dc24f9a5e6b9b28b879c65a52 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2a42a8956b18b --- /dev/null +++ b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 @@ -0,0 +1 @@ +982faf2bfa55542bf57fbadef54c19ac00f57cae \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c4cd9e47624f8..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6c8be427ec8ffc7e8233ffbf0d190d95a56cf14 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..64a0b07f72d29 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 @@ -0,0 +1 @@ +06493dbd14d02537716822254866a94458f4d842 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index dfee145d3ea26..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11716d61288feaa692593bf699affa8de2b564c4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d1bcb0581435c --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9d9a731822ad6eefa1ba288a0c158d478522f165 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index c7410086ba86c..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a888e06c0535403b9e58a8dcddeb5e6513a4930 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d17459cc569a9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 @@ -0,0 +1 @@ +ce752a52b2d4eac90633c7df7982e29504f99e76 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 deleted file mode 100644 index 6d8d4205f4d02..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52dfc8bf135ed29f5baf0a967c1bb63dedb9a069 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0.jar.sha1 b/server/licenses/lucene-suggest-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ff47b87672d2c --- /dev/null +++ b/server/licenses/lucene-suggest-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f977f96f2093b7fddea6b67caa2e1c5b10edebf6 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java index 6343bd127c458..4c9f49df71257 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java @@ -60,8 +60,8 @@ public class HotThreadsIT extends OpenSearchIntegTestCase { public void testHotThreadsDontFail() throws ExecutionException, InterruptedException { - /** - * This test just checks if nothing crashes or gets stuck etc. + /* + This test just checks if nothing crashes or gets stuck etc. */ createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java index 0197ccf059737..44ba585016d8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java @@ -112,7 +112,7 @@ protected int numberOfEvents(String actionMasks, Function findEvents(String actionMasks, Function, Boolean> criteria) { List events = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index c733329a1b5f7..e6fd9139d45f2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -30,7 +30,7 @@ /** * Integration tests for task management API with Concurrent Segment Search - * + *

          * The way the test framework bootstraps the test cluster makes it difficult to parameterize the feature flag. * Once concurrent search is moved behind a cluster setting we can parameterize these tests behind the setting. */ @@ -72,7 +72,7 @@ protected Settings featureFlagSettings() { /** * Tests the number of threads that worked on a search task. - * + *

          * Currently, we try to control concurrency by creating an index with 7 segments and rely on * the way concurrent search creates leaf slices from segments. Once more concurrency controls are introduced * we should improve this test to use those methods. diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index aff7c5d9876ac..36fe3748e9d10 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -46,7 +46,7 @@ /** * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. - * + *

          * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 347011721c728..78fb01b07b6b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -53,7 +53,7 @@ /** * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. - * + *

          * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 50ff76c6b62f3..82ab5b0118c0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -37,6 +37,7 @@ public AcknowledgedResponse createDataStream(String name) throws Exception { CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(name); AcknowledgedResponse response = client().admin().indices().createDataStream(request).get(); assertThat(response.isAcknowledged(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -67,6 +68,7 @@ public RolloverResponse rolloverDataStream(String name) throws Exception { RolloverResponse response = client().admin().indices().rolloverIndex(request).get(); assertThat(response.isAcknowledged(), is(true)); assertThat(response.isRolledOver(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -109,5 +111,4 @@ public AcknowledgedResponse deleteIndexTemplate(String name) throws Exception { assertThat(response.isAcknowledged(), is(true)); return response; } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java index 737c0acc309fd..cd6cb0ca3b172 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java @@ -194,7 +194,7 @@ private static void indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + *

          * This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java index c62c61d5919d6..aefabcb9bc14f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java @@ -69,7 +69,7 @@ /** * The purpose of this test is to verify that when a processor executes an operation asynchronously that * the expected result is the same as if the same operation happens synchronously. - * + *

          * In this test two test processor are defined that basically do the same operation, but a single processor * executes asynchronously. The result of the operation should be the same and also the order in which the * bulk responses are returned should be the same as how the corresponding index requests were defined. diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java index f0a3b5a5901ce..b1934f901ac65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java @@ -109,8 +109,8 @@ public List getAggregations() { @Override public List getFetchSubPhases(FetchPhaseConstructionContext context) { - /** - * Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". + /* + Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". */ return Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 4c8bf24b1655a..84648eda3d38c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -317,8 +317,8 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { ); Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); - internalCluster().stopRandomNonClusterManagerNode(); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); logger.info("--> verify that there is no cluster-manager anymore on remaining node"); // spin here to wait till the state is set diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 38b86d307d197..737b272613a44 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -461,7 +461,7 @@ public boolean validateClusterForming() { /** * Tests that indices are properly deleted even if there is a cluster-manager transition in between. - * Test for https://github.com/elastic/elasticsearch/issues/11665 + * Test for Elasticsearch issue #11665 */ public void testIndicesDeleted() throws Exception { final String idxName = "test"; diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 1463c45aa9b2f..79f6ba6dfa642 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -199,6 +200,8 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc } } + ClusterStateStats clusterStateStats = internalCluster().clusterService().getClusterManagerService().getClusterStateStats(); + assertTrue(clusterStateStats.getUpdateFailed() > 0); }); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index a2864b6dfd1da..70124c8c46700 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -136,7 +136,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { // shutting down the nodes, to avoid the leakage check tripping // on the states associated with the commit requests we may have dropped - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); } public void testClusterFormingWithASlowNode() { diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 2bab61f3e1c4c..229cd7bffad2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -53,7 +53,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.ShardPath; @@ -519,7 +519,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .put("number_of_replicas", 1) // disable merges to keep segments the same - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 6fcc89cfe9e9a..59eef3c06844b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -8,9 +8,12 @@ package org.opensearch.gateway.remote; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.DiscoveryStats; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -19,6 +22,7 @@ import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Map; +import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; @@ -86,14 +90,53 @@ public void testFullClusterRestoreStaleDelete() throws Exception { assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); - Map indexMetadataMap = remoteClusterStateService.getLatestIndexMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestMetadata( cluster().getClusterName(), getClusterState().metadata().clusterUUID() - ); + ).getIndices(); assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); } + public void testRemoteStateStats() { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String clusterManagerNode = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().getDataNodeNames().stream().collect(Collectors.toList()).get(0); + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(clusterManagerNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + + // assert cluster state stats + DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); + + assertNotNull(discoveryStats.getClusterStateStats()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateSuccess() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getUpdateFailed()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() > 0); + // assert remote state stats + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getSuccessCount() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getFailedCount()); + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getTotalTimeInMillis() > 0); + + NodesStatsResponse nodesStatsResponseDataNode = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + // assert cluster state stats for data node + DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); + assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); + assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + } + private void setReplicaCount(int replicaCount) { client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index bb08b19df765b..c394a1f631690 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -650,7 +650,15 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul } } }; - final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener); + NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); + final IndexShard newShard = newIndexShard( + indexService, + shard, + wrapper, + getInstanceFromNode(CircuitBreakerService.class), + env.nodeId(), + listener + ); shardRef.set(newShard); recoverShard(newShard); @@ -674,6 +682,7 @@ public static final IndexShard newIndexShard( final IndexShard shard, CheckedFunction wrapper, final CircuitBreakerService cbs, + final String nodeId, final IndexingOperationListener... listeners ) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); @@ -702,7 +711,9 @@ public static final IndexShard newIndexShard( SegmentReplicationCheckpointPublisher.EMPTY, null, null, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + nodeId, + null ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java index f8c2acbf99f70..b431079476624 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -73,7 +73,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.translog.TestTranslog; @@ -135,7 +135,7 @@ public void testCorruptIndex() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 7e1d0792e3ddb..8291fef5d177b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -72,7 +72,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -167,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -286,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files @@ -552,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -624,7 +624,7 @@ public void testReplicaCorruption() throws Exception { prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index bdefd7a5e199a..f485d4e402b41 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -91,7 +91,7 @@ public void testGlobalPrimaryAllocation() throws Exception { /** * This test verifies the happy path where primary shard allocation is balanced when multiple indices are created. - * + *

          * This test in general passes without primary shard balance as well due to nature of allocation algorithm which * assigns all primary shards first followed by replica copies. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 8e68a8bde39d5..1d93eecd6b245 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -197,9 +197,10 @@ protected IndexShard getIndexShard(String node, ShardId shardId, String indexNam protected IndexShard getIndexShard(String node, String indexName) { final Index index = resolveIndex(indexName); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexServiceSafe(index); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); final Optional shardId = indexService.shardIds().stream().findFirst(); - return indexService.getShard(shardId.get()); + return shardId.map(indexService::getShard).orElse(null); } protected boolean segmentReplicationWithRemoteEnabled() { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index a82fd8d845709..186a5ce39f131 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -19,6 +19,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -123,4 +124,30 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } + public void testIndexReplicationTypeWhenRestrictSettingTrue() { + testRestrictIndexReplicationTypeSetting(true, randomFrom(ReplicationType.values())); + } + + public void testIndexReplicationTypeWhenRestrictSettingFalse() { + testRestrictIndexReplicationTypeSetting(false, randomFrom(ReplicationType.values())); + } + + private void testRestrictIndexReplicationTypeSetting(boolean setRestrict, ReplicationType replicationType) { + String expectedExceptionMsg = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true];"; + String clusterManagerName = internalCluster().startNode( + Settings.builder().put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), setRestrict).build() + ); + internalCluster().startDataOnlyNodes(1); + + // Test create index fails + Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, replicationType).build(); + if (setRestrict) { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } else { + createIndex(INDEX_NAME, indexSettings); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java new file mode 100644 index 0000000000000..66b26b5d25cfe --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * These tests simulate corruption cases during replication. They are skipped on WindowsFS simulation where file renaming + * can fail with an access denied IOException because deletion is not permitted. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class SegmentReplicationDisruptionIT extends SegmentReplicationBaseIT { + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + public void testSendCorruptBytesToReplica() throws Exception { + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean failed = new AtomicBoolean(false); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { + FileChunkRequest req = (FileChunkRequest) request; + TransportRequest corrupt = new FileChunkRequest( + req.recoveryId(), + ((FileChunkRequest) request).requestSeqNo(), + ((FileChunkRequest) request).shardId(), + ((FileChunkRequest) request).metadata(), + ((FileChunkRequest) request).position(), + new BytesArray("test"), + false, + 0, + 0L + ); + connection.sendRequest(requestId, action, corrupt, options); + latch.countDown(); + } else { + connection.sendRequest(requestId, action, request, options); + } + } + ); + for (int i = 0; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + assertNotEquals(originalRecoveryTime, 0); + refresh(INDEX_NAME); + latch.await(); + assertTrue(failed.get()); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + // reset checkIndex to ensure our original shard doesn't throw + resetCheckIndexStatus(); + waitForSearchableDocs(100, primaryNode, replicaNode); + } + + public void testWipeSegmentBetweenSyncs() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); + indexShard.store().directory().deleteFile("_0.si"); + + for (int i = 11; i < 21; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + resetCheckIndexStatus(); + waitForSearchableDocs(20, primaryNode, replicaNode); + } + + private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { + assertBusy(() -> { + // assert we have a peer recovery after the original + final long time = getRecoveryStopTime(replicaNode); + assertNotEquals(time, 0); + assertNotEquals(originalRecoveryTime, time); + + }, 1, TimeUnit.MINUTES); + } + + private long getRecoveryStopTime(String nodeName) { + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); + final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); + for (RecoveryState recoveryState : recoveryStates) { + if (recoveryState.getTargetNode().getName().equals(nodeName)) { + return recoveryState.getTimer().stopTime(); + } + } + return 0L; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 33bc5a8f3afe6..f48df082a25dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -1776,5 +1776,4 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { assertThat(response.getIndex(), equalTo(INDEX_NAME)); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index dd832a63d1e66..dbe0b43441f54 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -26,6 +26,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -55,6 +56,7 @@ private void createIndex(int replicaCount) { * This test verifies happy path when primary shard is relocated newly added node (target) in the cluster. Before * relocation and after relocation documents are indexed and documents are verified */ + @TestLogging(reason = "Getting trace logs from replication,shard and allocation package", value = "org.opensearch.indices.replication:TRACE, org.opensearch.index.shard:TRACE, org.opensearch.cluster.routing.allocation:TRACE") public void testPrimaryRelocation() throws Exception { final String oldPrimary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index ae88dd76d54e0..547f9e7a8d380 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -509,7 +509,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { } /** - * Test for https://github.com/elastic/elasticsearch/issues/47276 which checks that the persisted metadata on a data node does not + * Test for Elasticsearch issue #47276 which checks that the persisted metadata on a data node does not * become inconsistent when using replicated closed indices. */ public void testRelocatedClosedIndexIssue() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index a0f01acd1f8e9..0967acb37d3e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -66,8 +66,8 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.VersionConflictEngineException; @@ -589,8 +589,8 @@ public void testNonThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000") ) @@ -621,8 +621,8 @@ public void testThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1") .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name()) diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index f636185fd4649..d28df90216beb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -198,11 +198,11 @@ public void testNoRebalanceOnRollingRestart() throws Exception { // see https://github.com/elastic/elasticsearch/issues/14387 internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(3); - /** - * We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. - * Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject - * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. - * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. + /* + We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. + Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject + to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. + We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. */ prepareCreate("test").setSettings( Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index bc55f6cc2cbcb..8166c0008ed83 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -33,7 +33,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase { @@ -107,14 +106,18 @@ public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailu .build(); } - protected void deleteRepo() { - logger.info("--> Deleting the repository={}", REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - logger.info("--> Deleting the repository={}", TRANSLOG_REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(TRANSLOG_REPOSITORY_NAME)); + protected void cleanupRepo() { + logger.info("--> Cleanup the repository={}", REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).execute().actionGet(); + logger.info("--> Cleanup the repository={}", TRANSLOG_REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(TRANSLOG_REPOSITORY_NAME).execute().actionGet(); } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + return setup(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure, 0); + } + + protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure, int replicaCount) { // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the // repository creation can happen without failure. @@ -125,8 +128,11 @@ protected String setup(Path repoLocation, double ioFailureRate, String skipExcep settings.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); } + disableRepoConsistencyCheck("Remote Store Creates System Repository"); + internalCluster().startClusterManagerOnlyNode(settings.build()); String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); + internalCluster().startDataOnlyNodes(replicaCount, settings.build()); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); @@ -159,7 +165,7 @@ private String getLocalSegmentFilename(String remoteFilename) { return remoteFilename.split(RemoteSegmentStoreDirectory.SEGMENT_NAME_UUID_SEPARATOR)[0]; } - private IndexResponse indexSingleDoc() { + protected IndexResponse indexSingleDoc() { return client().prepareIndex(INDEX_NAME) .setId(UUIDs.randomBase64UUID()) .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index ad3e99dd274ce..99c5d7fb2bae7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -46,7 +46,10 @@ protected Collection> nodePlugins() { } protected void restore(String... indices) { - boolean restoreAllShards = randomBoolean(); + restore(randomBoolean(), indices); + } + + protected void restore(boolean restoreAllShards, String... indices) { if (restoreAllShards) { assertAcked(client().admin().indices().prepareClose(indices)); } @@ -58,7 +61,7 @@ protected void restore(String... indices) { ); } - protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + protected void verifyRestoredData(Map indexStats, String indexName, boolean indexMoreData) throws Exception { ensureYellowAndNoInitializingShards(indexName); ensureGreen(indexName); // This is to ensure that shards that were already assigned will get latest count @@ -68,6 +71,8 @@ protected void verifyRestoredData(Map indexStats, String indexName 30, TimeUnit.SECONDS ); + if (indexMoreData == false) return; + IndexResponse response = indexSingleDoc(indexName); if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); @@ -80,6 +85,10 @@ protected void verifyRestoredData(Map indexStats, String indexName ); } + protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + verifyRestoredData(indexStats, indexName, true); + } + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 4eb1cc7703735..c957f1b338bfe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -23,7 +23,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexRecoveryIT extends IndexRecoveryIT { @@ -57,7 +56,7 @@ public Settings indexSettings() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 4ebccb9b9e551..9e0b2a66467de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.delete.DeleteResponse; @@ -20,8 +21,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.Index; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; @@ -32,11 +38,15 @@ import org.junit.Before; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; @@ -57,7 +67,7 @@ public void setup() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(BASE_REMOTE_REPO)); + clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get(); } @Override @@ -345,6 +355,90 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 4); } + public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + + // Make sure remote translog is empty + String indexUUID = client().admin() + .indices() + .prepareGetSettings(indexName1) + .get() + .getSetting(indexName1, IndexMetadata.SETTING_INDEX_UUID); + + Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); + Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); + + try ( + Stream translogMetadata = Files.list(remoteTranslogMetadataPath); + Stream translogData = Files.list(remoteTranslogDataPath) + ) { + assertTrue(translogData.count() > 0); + assertTrue(translogMetadata.count() > 0); + } + + // Clear the local data before stopping the node. This will make sure that remote translog is empty. + IndexShard indexShard = getIndexShard(primaryNodeName(indexName1), indexName1); + try (Stream files = Files.list(indexShard.shardPath().resolveTranslog())) { + IOUtils.deleteFilesIgnoringExceptions(files.collect(Collectors.toList())); + } + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + + ensureRed(indexName1); + + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(false), PlainActionFuture.newFuture()); + + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + + protected IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); @@ -422,7 +516,7 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } - public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { + public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException { String indexName1 = "testindex1"; String snapshotRepoName = "test-restore-snapshot-repo"; String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; @@ -464,22 +558,7 @@ public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionExce assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); - createRepository(BASE_REMOTE_REPO, "fs", absolutePath2); - - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - - assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); - - ensureRed(restoredIndexName1); - - client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java similarity index 57% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java rename to server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index d02c5bf54fbed..f19c9db7874db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -11,13 +11,20 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractAsyncTask; +import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexService; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -33,7 +40,7 @@ import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteStoreBackpressureIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { +public class RemoteStoreBackpressureAndResiliencyIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception { // Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions // fail leading to consecutive failure limit getting exceeded and leading to rejections. @@ -49,7 +56,7 @@ public void testWritesRejectedDueToBytesLagBreach() throws Exception { public void testWritesRejectedDueToTimeLagBreach() throws Exception { // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag"); + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag"); } private void validateBackpressure( @@ -112,7 +119,7 @@ private void validateBackpressure( stats = stats(); indexDocAndRefresh(initialSource, initialDocsToIndex); assertEquals(rejectionCount, stats.rejectionCount); - deleteRepo(); + cleanupRepo(); } private RemoteSegmentTransferTracker.Stats stats() { @@ -126,11 +133,13 @@ private RemoteSegmentTransferTracker.Stats stats() { return matches.get(0).getSegmentStats(); } - private void indexDocAndRefresh(BytesReference source, int iterations) { + private void indexDocAndRefresh(BytesReference source, int iterations) throws InterruptedException { for (int i = 0; i < iterations; i++) { client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); refresh(INDEX_NAME); } + Thread.sleep(250); + client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); } /** @@ -156,4 +165,98 @@ private String generateString(int sizeInBytes) { sb.append("}"); return sb.toString(); } + + /** + * Fixes Github#10398 + */ + public void testAsyncTrimTaskSucceeds() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing"); + IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); + ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(TimeValue.timeValueMillis(100)); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(2, 5), true); + logger.info("--> Indexing succeeded"); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + UncategorizedExecutionException exception = assertThrows(UncategorizedExecutionException.class, this::indexSingleDoc); + assertEquals("Failed execution", exception.getMessage()); + } + + translogRepo.setRandomControlIOExceptionRate(0d); + indexSingleDoc(); + logger.info("Indexed single doc successfully"); + } + + /** + * Fixes Github#10400 + */ + public void testSkipLoadGlobalCheckpointToReplicationTracker() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + + IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); + IndexShard indexShard = indexService.getShard(0); + indexShard.failShard("failing shard", null); + + ensureRed(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + // CLuster stays red still as the remote interactions are still failing + ensureRed(INDEX_NAME); + + logger.info("Retrying to allocate failed shards"); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + // CLuster stays red still as the remote interactions are still failing + ensureRed(INDEX_NAME); + + logger.info("Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + ensureGreen(INDEX_NAME); + } + + public void testFlushDuringRemoteUploadFailures() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + ensureGreen(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc()); + assertEquals("Failed execution", ex.getMessage()); + + FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getFailedShards()); + ensureGreen(INDEX_NAME); + + logger.info("--> Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getSuccessfulShards()); + assertEquals(0, flushResponse.getFailedShards()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 157f8e41fee24..bccca283ba772 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -27,6 +27,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -50,7 +51,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -271,7 +271,6 @@ public static Settings buildRemoteStoreNodeAttributes( if (withRateLimiterAttributes) { settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) - .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "4kb") .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } @@ -314,8 +313,8 @@ public void teardown() { clusterSettingsSuppliedByTest = false; assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + clusterAdmin().prepareCleanupRepository(REPOSITORY_2_NAME).get(); } public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { @@ -343,11 +342,24 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { .custom(RepositoriesMetadata.TYPE); RepositoryMetadata actualRepository = repositories.repository(repositoryName); + final RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + for (String nodeName : internalCluster().getNodeNames()) { ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); DiscoveryNode node = clusterService.localNode(); RepositoryMetadata expectedRepository = buildRepositoryMetadata(node, repositoryName); - assertTrue(actualRepository.equalsIgnoreGenerations(expectedRepository)); + + // Validated that all the restricted settings are entact on all the nodes. + repository.getRestrictedSystemRepositorySettings() + .stream() + .forEach( + setting -> assertEquals( + String.format(Locale.ROOT, "Restricted Settings mismatch [%s]", setting.getKey()), + setting.get(actualRepository.settings()), + setting.get(expectedRepository.settings()) + ) + ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 5e92bb195680b..e9afd6d36bb87 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -8,23 +8,40 @@ package org.opensearch.remotestore; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.admin.indices.datastream.DataStreamRolloverIT; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.nio.file.Files; -import java.util.Locale; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutionException; +import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; +import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; -import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { @@ -48,47 +65,17 @@ private Map initialTestSetup(int shardCount, int replicaCount, int private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { internalCluster().stopAllNodes(); - addNewNodes(dataNodeCount, clusterManagerNodeCount); + internalCluster().startClusterManagerOnlyNodes(clusterManagerNodeCount); + internalCluster().startDataOnlyNodes(dataNodeCount); } - private void restoreAndValidate(String clusterUUID, Map indexStats) throws Exception { - restoreAndValidate(clusterUUID, indexStats, true); + protected void verifyRedIndicesAndTriggerRestore(Map indexStats, String indexName, boolean indexMoreDocs) + throws Exception { + ensureRed(indexName); + restore(false, indexName); + verifyRestoredData(indexStats, indexName, indexMoreDocs); } - private void restoreAndValidate(String clusterUUID, Map indexStats, boolean validate) throws Exception { - // TODO once auto restore is merged, the remote cluster state will be restored - - if (validate) { - // Step - 4 validation restore is successful. - ensureGreen(INDEX_NAME); - verifyRestoredData(indexStats, INDEX_NAME); - } - } - - private void restoreAndValidateFails( - String clusterUUID, - PlainActionFuture actionListener, - Class clazz, - String errorSubString - ) { - - try { - restoreAndValidate(clusterUUID, null, false); - } catch (Exception e) { - assertTrue( - String.format(Locale.ROOT, "%s %s", clazz, e), - clazz.isAssignableFrom(e.getClass()) - || clazz.isAssignableFrom(e.getCause().getClass()) - || (e.getCause().getCause() != null && clazz.isAssignableFrom(e.getCause().getCause().getClass())) - ); - assertTrue( - String.format(Locale.ROOT, "Error message mismatch. Expected: [%s]. Actual: [%s]", errorSubString, e.getMessage()), - e.getMessage().contains(errorSubString) - ); - } - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") public void testFullClusterRestore() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -106,10 +93,76 @@ public void testFullClusterRestore() throws Exception { assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; // Step - 3 Trigger full cluster restore and validate - restoreAndValidate(prevClusterUUID, indexStats); + validateMetadata(List.of(INDEX_NAME)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } + + /** + * This test scenario covers the case where right after remote state restore and persisting it to disk via LucenePersistedState, full cluster restarts. + * This is a special case for remote state as at this point cluster uuid in the restored state is still ClusterState.UNKNOWN_UUID as we persist it disk. + * After restart the local disk state will be read but should be again overridden with remote state. + * + * 1. Form a cluster and index few docs + * 2. Replace all nodes to remove all local disk state + * 3. Start cluster manager node without correct seeding to ensure local disk state is written with cluster uuid ClusterState.UNKNOWN_UUID but with remote restored Metadata + * 4. Restart the cluster manager node with correct seeding. + * 5. After restart the cluster manager picks up the local disk state with has same Metadata as remote but cluster uuid is still ClusterState.UNKNOWN_UUID + * 6. The cluster manager will try to restore from remote again. + * 7. Metadata loaded from local disk state will be overridden with remote Metadata and no conflict should arise. + * 8. Add data nodes to recover index data + * 9. Verify Metadata and index data is restored. + */ + public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // stop all nodes + internalCluster().stopAllNodes(); + + // start a cluster manager node with no cluster manager seeding. + // This should fail with IllegalStateException as cluster manager fails to form without any initial seed + assertThrows( + IllegalStateException.class, + () -> internalCluster().startClusterManagerOnlyNodes( + clusterManagerNodeCount, + Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable seeding during bootstrapping + .build() + ) + ); + + // verify cluster manager not elected + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) + : "Disabling Cluster manager seeding failed. cluster uuid is not unknown"; + + // restart cluster manager with correct seed + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) // Seed with correct Cluster Manager node + .build(); + } + }); + + // validate new cluster state formed + newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown"; + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + validateMetadata(List.of(INDEX_NAME)); + + // start data nodes to trigger index data recovery + internalCluster().startDataOnlyNodes(dataNodeCount); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") public void testFullClusterRestoreMultipleIndices() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -124,6 +177,7 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { Map indexStats2 = indexData(1, false, secondIndexName); assertEquals((shardCount + 1) * (replicaCount + 1), getNumShards(secondIndexName).totalNumShards); ensureGreen(secondIndexName); + updateIndexBlock(true, secondIndexName); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); @@ -134,155 +188,234 @@ public void testFullClusterRestoreMultipleIndices() throws Exception { assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; // Step - 3 Trigger full cluster restore - restoreAndValidate(prevClusterUUID, indexStats); - ensureGreen(secondIndexName); - verifyRestoredData(indexStats2, secondIndexName); + validateMetadata(List.of(INDEX_NAME, secondIndexName)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + verifyRedIndicesAndTriggerRestore(indexStats2, secondIndexName, false); + assertTrue(INDEX_READ_ONLY_SETTING.get(clusterService().state().metadata().index(secondIndexName).getSettings())); + assertThrows(ClusterBlockException.class, () -> indexSingleDoc(secondIndexName)); + // Test is complete + + // Remove the block to ensure proper cleanup + updateIndexBlock(false, secondIndexName); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestoreFailureValidationFailures() throws Exception { + public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); int clusterManagerNodeCount = 1; - // index some data to generate files in remote directory - Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + // Step - 1 index some data to generate files in remote directory + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - // Start of Test - 1 - // Test - 1 Trigger full cluster restore and validate it fails due to incorrect cluster UUID - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreAndValidateFails("randomUUID", future, IllegalStateException.class, "Remote Cluster State not found - randomUUID"); - // End of Test - 1 - - // Start of Test - 3 - // Test - 2 Trigger full cluster restore and validate it fails due to cluster UUID same as current cluster UUID - future = PlainActionFuture.newFuture(); - restoreAndValidateFails( - clusterService().state().metadata().clusterUUID(), - future, - IllegalArgumentException.class, - "clusterUUID to restore from should be different from current cluster UUID" - ); - // End of Test - 2 + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + String clusterName = clusterService().state().getClusterName().value(); - // Start of Test - 3 // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata - // Restarting cluster with just 1 data node helps with applying cluster settings - resetCluster(1, clusterManagerNodeCount); - String newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - - reduceShardLimits(1, 1); - - // Step - 4 Trigger full cluster restore and validate it fails - future = PlainActionFuture.newFuture(); - restoreAndValidateFails( - prevClusterUUID, - future, - IllegalArgumentException.class, - "this action would add [2] total shards, but this cluster currently has [0]/[1] maximum shards open" - ); - resetShardLimits(); - // End of Test - 3 + internalCluster().stopAllNodes(); + // Step - 3 Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertThrows(IllegalStateException.class, () -> addNewNodes(dataNodeCount, clusterManagerNodeCount)); + // Test is complete - // Start of Test - 4 - // Test -4 Reset cluster and trigger full restore with same name index in the cluster - // Test -4 Add required nodes for this test after last reset. - addNewNodes(dataNodeCount - 1, 0); + // Starting a node without remote state to ensure test cleanup + internalCluster().startNode(Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build()); + } - newClusterUUID = clusterService().state().metadata().clusterUUID(); - assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + public void testRemoteStateFullRestart() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; - // Test -4 Step - 2 Create a new index with same name - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); - ensureYellowAndNoInitializingShards(INDEX_NAME); + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + // Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) + + "/cluster-state/" + + prevClusterUUID + + "/manifest" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + internalCluster().fullRestart(); ensureGreen(INDEX_NAME); + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; + validateCurrentMetadata(); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } - future = PlainActionFuture.newFuture(); + private void validateMetadata(List indexNames) { + assertEquals(clusterService().state().metadata().indices().size(), indexNames.size()); + for (String indexName : indexNames) { + assertTrue(clusterService().state().metadata().hasIndex(indexName)); + } + } - // Test -4 Step - 3 Trigger full cluster restore and validate fails - restoreAndValidateFails( - prevClusterUUID, - future, - IllegalStateException.class, - "cannot restore index [remote-store-test-idx-1] because an open index with same name/uuid already exists in the cluster" + private void validateCurrentMetadata() throws Exception { + RemoteClusterStateService remoteClusterStateService = internalCluster().getInstance( + RemoteClusterStateService.class, + internalCluster().getClusterManagerName() ); + assertBusy(() -> { + ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + ClusterState clusterState = getClusterState(); + Metadata currentMetadata = clusterState.metadata(); + assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); + assertEquals(currentMetadata.coordinationMetadata().term(), manifest.getClusterTerm()); + assertEquals(clusterState.version(), manifest.getStateVersion()); + assertEquals(clusterState.stateUUID(), manifest.getStateUUID()); + assertEquals(currentMetadata.clusterUUIDCommitted(), manifest.isClusterUUIDCommitted()); + for (UploadedIndexMetadata uploadedIndexMetadata : manifest.getIndices()) { + IndexMetadata currentIndexMetadata = currentMetadata.index(uploadedIndexMetadata.getIndexName()); + assertEquals(currentIndexMetadata.getIndex().getUUID(), uploadedIndexMetadata.getIndexUUID()); + } + }); + } - // Test -4 Step - 4 validation restore is successful. - ensureGreen(INDEX_NAME); - // End of Test - 4 + public void testDataStreamPostRemoteStateRestore() throws Exception { + new DataStreamRolloverIT() { + protected boolean triggerRemoteStateRestore() { + return true; + } + }.testDataStreamRollover(); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") - public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { + public void testFullClusterRestoreGlobalMetadata() throws Exception { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; int dataNodeCount = shardCount * (replicaCount + 1); int clusterManagerNodeCount = 1; // Step - 1 index some data to generate files in remote directory - initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + // Create global metadata - register a custom repo + Path repoPath = registerCustomRepository(); + + // Create global metadata - persistent settings + updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build()); + + // Create global metadata - index template + putIndexTemplate(); + + // Create global metadata - Put cluster block + addClusterLevelReadOnlyBlock(); + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata resetCluster(dataNodeCount, clusterManagerNodeCount); String newClusterUUID = clusterService().state().metadata().clusterUUID(); assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; - // Step - 4 Delete index metadata file in remote - try { - Files.move( - segmentRepoPath.resolve( - RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) - + "/cluster-state/" - + prevClusterUUID - + "/index" - ), - segmentRepoPath.resolve("cluster-state/") - ); - } catch (IOException e) { - throw new RuntimeException(e); - } - - // Step - 5 Trigger full cluster restore and validate fails - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreAndValidateFails(prevClusterUUID, future, IllegalStateException.class, "asdsa"); + // Step - 3 Trigger full cluster restore and validate + // validateCurrentMetadata(); + assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); + assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + // Remote the cluster read only block to ensure proper cleanup + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertFalse(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + + // validate global metadata restored + verifyRestoredRepositories(repoPath); + verifyRestoredIndexTemplate(); } - private void reduceShardLimits(int maxShardsPerNode, int maxShardsPerCluster) { - // Step 3 - Reduce shard limits to hit shard limit with less no of shards - try { + private Path registerCustomRepository() { + Path path = randomRepoPath(); + assertAcked( client().admin() .cluster() - .updateSettings( - new ClusterUpdateSettingsRequest().transientSettings( - Settings.builder() - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) - .put(SETTING_MAX_SHARDS_PER_CLUSTER_KEY, maxShardsPerCluster) - ) - ) - .get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } + .preparePutRepository("custom-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", path).put("compress", false)) + .get() + ); + return path; + } + + private void verifyRestoredRepositories(Path repoPath) { + RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE); + assertEquals(3, repositoriesMetadata.repositories().size()); // includes remote store repo as well + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); + assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + assertEquals( + Settings.builder().put("location", repoPath).put("compress", false).build(), + repositoriesMetadata.repository("custom-repo").settings() + ); + + // repo cleanup post verification + clusterAdmin().prepareDeleteRepository("custom-repo").get(); + } + + private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException { + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), true).build()); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); } - private void resetShardLimits() { - // Step - 5 Reset the cluster settings + private void updatePersistentSettings(Settings settings) throws ExecutionException, InterruptedException { ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); - resetRequest.transientSettings( - Settings.builder().putNull(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()).putNull(SETTING_MAX_SHARDS_PER_CLUSTER_KEY) + resetRequest.persistentSettings(settings); + assertAcked(client().admin().cluster().updateSettings(resetRequest).get()); + } + + private void verifyRestoredIndexTemplate() { + Map indexTemplateMetadataMap = clusterService().state().metadata().templates(); + assertEquals(1, indexTemplateMetadataMap.size()); + assertEquals(Arrays.asList("pattern-1", "log-*"), indexTemplateMetadataMap.get("my-template").patterns()); + assertEquals( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + .build(), + indexTemplateMetadataMap.get("my-template").settings() ); + } - try { - client().admin().cluster().updateSettings(resetRequest).get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } + private static void putIndexTemplate() { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); } + private static void updateIndexBlock(boolean value, String secondIndexName) throws InterruptedException, ExecutionException { + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(Settings.builder().put(INDEX_READ_ONLY_SETTING.getKey(), value).build(), secondIndexName) + ) + .get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 1fb5c2052aded..b3b4f8e10fd31 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -509,4 +509,27 @@ public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Excepti assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); }); } + + public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + IndexShard indexShard = getIndexShard(primaryShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + + String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen(INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + indexShard = getIndexShard(replicaShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index b97e93f323fb2..acdb21d072320 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -28,9 +28,13 @@ public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { public void testRemoteRefreshRetryOnFailure() throws Exception { - Path location = randomRepoPath().toAbsolutePath(); setup(location, randomDoubleBetween(0.1, 0.15, true), "metadata", 10L); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), false)) + .get(); // Here we are having flush/refresh after each iteration of indexing. However, the refresh will not always succeed // due to IOExceptions that are thrown while doing uploadBlobs. @@ -56,7 +60,7 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { logger.info("Local files = {}, Repo files = {}", sortedFilesInLocal, sortedFilesInRepo); assertTrue(filesInRepo.containsAll(filesInLocal)); }, 90, TimeUnit.SECONDS); - deleteRepo(); + cleanupRepo(); } public void testRemoteRefreshSegmentPressureSettingChanged() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index 4d56a1e94e3fc..ef2dcf3217df6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -8,17 +8,31 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { @@ -94,4 +108,77 @@ public void testMultiNodeClusterRandomNodeRecoverNetworkIsolation() { internalCluster().clearDisruptionScheme(); } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrictedSettingsUpdate() { + Set nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + + final Client client = client(nodesInOneSide.iterator().next()); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } + + public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startNodes(3); + + final Client client = client(); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + internalCluster().restartRandomDataNode(); + + ensureStableCluster(4); + } + + public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException { + GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON)); + XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder)); + assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings())); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 65335f444a2df..7626e3dba6424 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -12,17 +12,26 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.nio.file.Path; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThan; @@ -388,14 +397,41 @@ public void testRTSRestoreDataOnlyInTranslog() throws Exception { public void testRateLimitedRemoteDownloads() throws Exception { clusterSettingsSuppliedByTest = true; int shardCount = randomIntBetween(1, 3); + Path segmentRepoPath = randomRepoPath(); + Path tlogRepoPath = randomRepoPath(); prepareCluster( 1, 3, INDEX_NAME, 0, shardCount, - buildRemoteStoreNodeAttributes(REPOSITORY_NAME, randomRepoPath(), REPOSITORY_2_NAME, randomRepoPath(), true) + buildRemoteStoreNodeAttributes(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, tlogRepoPath, true) ); + + // validate inplace repository metadata update + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + DiscoveryNode node = clusterService.localNode(); + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); + + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); + assertEquals("4096b", segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); + } + Map indexStats = indexData(5, false, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); @@ -414,6 +450,15 @@ public void testRateLimitedRemoteDownloads() throws Exception { assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); assertEquals(0, getNumShards(INDEX_NAME).numReplicas); verifyRestoredData(indexStats, INDEX_NAME); + + // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown + // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 + settings.remove("max_remote_download_bytes_per_sec"); + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); + assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); + } } // TODO: Restore flow - index aliases diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 8ae25c6758195..2d3ab135d0377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -15,6 +15,8 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -23,15 +25,20 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Before; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -44,12 +51,17 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; - @Before + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + public void setup() { internalCluster().startNodes(3); } public void testStatsResponseFromAllNodes() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -118,6 +130,7 @@ public void testStatsResponseFromAllNodes() { } public void testStatsResponseAllShards() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -175,6 +188,7 @@ public void testStatsResponseAllShards() { } public void testStatsResponseFromLocalNode() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -236,6 +250,7 @@ public void testStatsResponseFromLocalNode() { } public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Disable Refresh Interval for the index @@ -325,6 +340,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { + setup(); // Scenario: // - Create index with single primary and N-1 replica shards (N = no of data nodes) // - Disable Refresh Interval for the index @@ -416,6 +432,7 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr } public void testStatsOnShardRelocation() { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Index documents @@ -471,6 +488,7 @@ public void testStatsOnShardRelocation() { } public void testStatsOnShardUnassigned() throws IOException { + setup(); // Scenario: // - Create index with single primary and two replica shard // - Index documents @@ -497,6 +515,7 @@ public void testStatsOnShardUnassigned() throws IOException { } public void testStatsOnRemoteStoreRestore() throws IOException { + setup(); // Creating an index with primary shard count == total nodes in cluster and 0 replicas int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount)); @@ -544,6 +563,7 @@ public void testStatsOnRemoteStoreRestore() throws IOException { } public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exception { + setup(); // Create an index with one primary and one replica shard createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); ensureGreen(INDEX_NAME); @@ -561,26 +581,103 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce .getRemoteStoreStats(); Arrays.stream(remoteStoreStats).forEach(statObject -> { RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); if (statObject.getShardRouting().primary()) { assertTrue( segmentStats.totalUploadsSucceeded == 1 && segmentStats.totalUploadsStarted == segmentStats.totalUploadsSucceeded && segmentStats.totalUploadsFailed == 0 ); + // On primary shard creation, we upload to remote translog post primary mode activation. + // This changes upload stats to non-zero for primary shard. + assertNonZeroTranslogUploadStatsNoFailures(translogStats); } else { assertTrue( segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 ); + assertZeroTranslogUploadStats(translogStats); } - - RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); - assertZeroTranslogUploadStats(translogStats); assertZeroTranslogDownloadStats(translogStats); }); }, 5, TimeUnit.SECONDS); } + public void testStatsCorrectnessOnFailover() { + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(nodeSettings(0)) + .build(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(clusterSettings); + internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + + // Index some docs and refresh + indexDocs(); + refresh(INDEX_NAME); + + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + + // Start network disruption - primary node will be isolated + Set nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + ensureStableCluster(2, clusterManagerNode); + + RemoteStoreStatsResponse response = client(clusterManagerNode).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, "0"); + List matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(1, matches.size()); + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + assertEquals(0, segmentStats.refreshTimeLagMs); + + networkDisruption.stopDisrupting(); + internalCluster().clearDisruptionScheme(); + ensureStableCluster(3, clusterManagerNode); + ensureGreen(INDEX_NAME); + logger.info("Test completed"); + } + + public void testZeroLagOnCreateIndex() throws InterruptedException { + setup(); + String clusterManagerNode = internalCluster().getClusterManagerName(); + + int numOfShards = randomIntBetween(1, 3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, numOfShards)); + ensureGreen(INDEX_NAME); + long currentTimeNs = System.nanoTime(); + while (currentTimeNs == System.nanoTime()) { + Thread.sleep(10); + } + + for (int i = 0; i < numOfShards; i++) { + RemoteStoreStatsResponse response = client(clusterManagerNode).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, String.valueOf(i)) + .get(); + for (RemoteStoreStats remoteStoreStats : response.getRemoteStoreStats()) { + assertEquals(0, remoteStoreStats.getSegmentStats().refreshTimeLagMs); + } + } + } + private void indexDocs() { for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java new file mode 100644 index 0000000000000..b7b3f1d14f422 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Optional; +import java.util.Set; + +/** + * This class runs tests with remote store + segRep while blocking file downloads + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationUsingRemoteStoreDisruptionIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(1); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCancelReplicationWhileSyncingSegments() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnySegmentFile(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_FILES, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + public void testCancelReplicationWhileFetchingMetadata() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnyFiles(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + private String getNode(Set dataNodeNames, boolean primary) { + assertEquals(2, dataNodeNames.size()); + for (String name : dataNodeNames) { + final IndexShard indexShard = getIndexShard(name, INDEX_NAME); + if (indexShard.routingEntry().primary() == primary) { + return name; + } + } + return null; + } + + private IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 45c3ef7f5bae5..23864c35ad154 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -17,7 +17,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** * This class runs Segment Replication Integ test suite with remote store enabled. @@ -50,6 +49,6 @@ public void setup() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index 0da4d81a8871e..6cfc76b7e3223 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -17,7 +17,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** * This class executes the SegmentReplicationPressureIT suite with remote store integration enabled. @@ -49,6 +48,6 @@ public void setup() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index 98fab139f4902..3dfde6f472525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -108,8 +108,15 @@ public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String nam } public void testRateLimitedRemoteUploads() throws Exception { + clusterSettingsSuppliedByTest = true; overrideBuildRepositoryMetadata = true; - internalCluster().startNode(); + Settings.Builder clusterSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryLocation, REPOSITORY_2_NAME, repositoryLocation)); + clusterSettings.put( + String.format(Locale.getDefault(), "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, REPOSITORY_NAME), + MockFsRepositoryPlugin.TYPE + ); + internalCluster().startNode(clusterSettings.build()); Client client = client(); logger.info("--> updating repository"); assertAcked( @@ -119,7 +126,6 @@ public void testRateLimitedRemoteUploads() throws Exception { .setType(MockFsRepositoryPlugin.TYPE) .setSettings( Settings.builder() - .put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true) .put("location", repositoryLocation) .put("compress", compress) .put("max_remote_upload_bytes_per_sec", "1kb") diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java index 079753de95680..36987ac2d4991 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java @@ -27,6 +27,7 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -124,11 +125,11 @@ public void readBlobAsync(String blobName, ActionListener listener) long contentLength = listBlobs().get(blobName).length(); long partSize = contentLength / 10; int numberOfParts = (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); - List blobPartStreams = new ArrayList<>(); + List blobPartStreams = new ArrayList<>(); for (int partNumber = 0; partNumber < numberOfParts; partNumber++) { long offset = partNumber * partSize; InputStreamContainer blobPartStream = new InputStreamContainer(readBlob(blobName, offset, partSize), partSize, offset); - blobPartStreams.add(blobPartStream); + blobPartStreams.add(() -> CompletableFuture.completedFuture(blobPartStream)); } ReadContext blobReadContext = new ReadContext(contentLength, blobPartStreams, null); listener.onResponse(blobReadContext); diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index f149d538cc47a..b8415f4b41815 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -108,4 +108,16 @@ public void testUpdateRepository() { final Repository updatedRepository = repositoriesService.repository(repositoryName); assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); } + + public void testSystemRepositoryCantBeCreated() { + internalCluster(); + final String repositoryName = "test-repo"; + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath()); + + assertThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index 4ce8af3e0f081..ee94e574228df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -124,7 +124,7 @@ protected Settings featureFlagSettings() { } private ZonedDateTime date(String date) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } private static String format(ZonedDateTime date, String pattern) { @@ -1481,7 +1481,7 @@ public void testExceptionOnNegativeInterval() { /** * https://github.com/elastic/elasticsearch/issues/31760 shows an edge case where an unmapped "date" field in two indices * that are queried simultaneously can lead to the "format" parameter in the aggregation not being preserved correctly. - * + *

          * The error happens when the bucket from the "unmapped" index is received first in the reduce phase, however the case can * be recreated when aggregating about a single index with an unmapped date field and also getting "empty" buckets. */ @@ -1624,8 +1624,8 @@ public void testScriptCaching() throws Exception { .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get() ); - String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); - String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); + String date = DateFieldMapper.getDefaultDateTimeFormatter().format(date(1, 1)); + String date2 = DateFieldMapper.getDefaultDateTimeFormatter().format(date(2, 1)); indexRandom( true, client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 04115f69172da..d44071e1ef9c5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -92,7 +92,7 @@ protected Settings featureFlagSettings() { } private ZonedDateTime date(String date) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 5e95073209c71..865dd670fbf68 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -221,6 +221,10 @@ public void testNestedDiversity() throws Exception { } public void testNestedSamples() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); // Test samples nested under samples int MAX_DOCS_PER_AUTHOR = 1; int MAX_DOCS_PER_GENRE = 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index bb90c1294ecb8..dc3b690c7f78f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -544,13 +544,13 @@ public void testNested() throws Exception { /** * https://github.com/elastic/elasticsearch/issues/33514 - * + *

          * This bug manifests as the max_bucket agg ("peak") being added to the response twice, because * the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps. * The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms * delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then * execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values. - * + *

          * Applies to any pipeline agg, not just max. */ public void testFieldIsntWrittenOutTwice() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 0cf89778c6e99..2aad0d2d38901 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -1168,7 +1168,7 @@ public void testHoltWintersMinimization() { * the default settings. Which means our mock histo will match the generated result (which it won't * if the minimizer is actually working, since the coefficients will be different and thus generate different * data) - * + *

          * We can simulate this by setting the window size == size of histo */ public void testMinimizeNotEnoughData() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 4cdf5ae8e674f..42d91ac945662 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -3343,6 +3343,10 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { } public void testHighlightQueryRewriteDatesWithNow() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10434", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index f5d1b8234558e..6b95405b3ebd4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -244,6 +244,10 @@ public void testWithIndexAlias() { } public void testWithIndexFilter() throws InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10433", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword")); assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 43b7179a335f8..4a178e7066846 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -93,8 +93,8 @@ public void testDeletePit() throws Exception { assertTrue(deletePitInfo.isSuccessful()); } validatePitStats("index", 0, 10); - /** - * Checking deleting the same PIT id again results in succeeded + /* + Checking deleting the same PIT id again results in succeeded */ deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); @@ -113,8 +113,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 0); - /** - * Delete Pit #1 + /* + Delete Pit #1 */ DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -128,8 +128,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitResponse = execute.get(); pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 5); - /** - * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + /* + Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) */ deletePITRequest = new DeletePitRequest(pitIds); deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -165,9 +165,9 @@ public void testDeleteAllPits() throws Exception { validatePitStats("index1", 5, 0); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -207,9 +207,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { }); ensureGreen(); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -242,9 +242,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); ensureGreen(); - /** - * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and - * once the node restarts, all active contexts are cleared in the node ) + /* + When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + once the node restarts, all active contexts are cleared in the node ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -278,8 +278,8 @@ public void testDeleteWhileSearch() throws Exception { } } } catch (Exception e) { - /** - * assert for exception once delete pit goes through. throw error in case of any exeption before that. + /* + assert for exception once delete pit goes through. throw error in case of any exeption before that. */ if (deleted.get() == true) { Throwable t = ExceptionsHelper.unwrapCause(e.getCause()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 5f794d2abf878..ef73438114079 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.profile.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -40,20 +42,23 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -61,8 +66,32 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { + private final boolean concurrentSearchEnabled; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + + public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { + super(settings); + this.concurrentSearchEnabled = concurrentSearchEnabled; + } -public class QueryProfilerIT extends OpenSearchIntegTestCase { + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -229,6 +258,7 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -271,6 +301,7 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); + assertQueryProfileResult(result); // Check the children List children = result.getProfiledChildren(); @@ -282,12 +313,14 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); + assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); + assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -330,6 +363,7 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -375,6 +409,7 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -415,6 +450,90 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testSearchLeafForItsLeavesAndRewriteQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = 122; + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + } + + List terms = Arrays.asList("zero", "zero", "one"); + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery( + QueryBuilders.idsQuery().addIds(String.valueOf(randomInt()), String.valueOf(randomInt())), + QueryBuilders.termsQuery("field1", terms) + ).boost(randomFloat()).negativeBoost(randomFloat()); + logger.info("Query: {}", q); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + assertThat(shardResult.getValue().getNetworkTime().getInboundNetworkTime(), greaterThanOrEqualTo(0L)); + assertThat(shardResult.getValue().getNetworkTime().getOutboundNetworkTime(), greaterThanOrEqualTo(0L)); + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + List results = searchProfiles.getQueryResults(); + for (ProfileResult result : results) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled && results.get(0).equals(result)) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else if (concurrentSearchEnabled) { + assertThat(maxSliceTime, equalTo(0L)); + assertThat(minSliceTime, equalTo(0L)); + assertThat(avgSliceTime, equalTo(0L)); + assertThat(breakdown.size(), equalTo(27)); + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } } CollectorResult result = searchProfiles.getCollectorResult(); @@ -455,6 +574,7 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -494,6 +614,7 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -547,6 +668,7 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -579,4 +701,35 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } + private void assertQueryProfileResult(ProfileResult result) { + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index f16b9a4d67b49..27002b844da1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -119,7 +119,7 @@ public void testScanScrollWithShardExceptions() throws Exception { assertThat(numHits, equalTo(100L)); clearScroll("_all"); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomDataNode(); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).setScroll(TimeValue.timeValueMinutes(1)).get(); assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards())); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 00ac574b8bd72..b99f66850e9e3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -220,8 +220,8 @@ public void testPitWithSearchAfter() throws Exception { .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) .get(); assertEquals(3, sr.getHits().getHits().length); - /** - * Add new data and assert PIT results remain the same and normal search results gets refreshed + /* + Add new data and assert PIT results remain the same and normal search results gets refreshed */ indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); sr = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java deleted file mode 100644 index 719b75079da92..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java +++ /dev/null @@ -1,608 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.simple; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchRequestBuilder; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.rescore.QueryRescorerBuilder; -import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.query.QueryBuilders.boolQuery; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.index.query.QueryBuilders.queryStringQuery; -import static org.opensearch.index.query.QueryBuilders.rangeQuery; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class ParameterizedSimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { - - public ParameterizedSimpleSearchIT(Settings settings) { - super(settings); - } - - @ParametersFactory - public static Collection parameters() { - return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } - ); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - - public void testSearchNullIndex() { - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - } - - public void testSearchRandomPreference() throws InterruptedException, ExecutionException { - createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") - ); - - int iters = scaledRandomIntBetween(10, 20); - for (int i = 0; i < iters; i++) { - String randomPreference = randomUnicodeOfLengthBetween(0, 4); - // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) - while (randomPreference.startsWith("_")) { - randomPreference = randomUnicodeOfLengthBetween(0, 4); - } - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .setPreference(randomPreference) - .get(); - assertHitCount(searchResponse, 6L); - - } - } - - public void testSimpleIp() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("from") - .field("type", "ip") - .endObject() - .startObject("to") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); - - SearchResponse search = client().prepareSearch() - .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) - .get(); - - assertHitCount(search, 1L); - } - - public void testIpCidr() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("ip") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - ensureGreen(); - - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); - refresh(); - - SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); - assertHitCount(search, 3L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); - assertHitCount(search, 5L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); - assertHitCount(search, 0L); - - assertFailures( - client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), - RestStatus.BAD_REQUEST, - containsString("Expected [ip/prefix] but was [0/0/0/0/0]") - ); - } - - public void testSimpleId() { - createIndex("test"); - - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); - assertHitCount(searchResponse, 1L); - } - - public void testSimpleDateRange() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - ensureGreen(); - refresh(); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) - .get(); - assertHitCount(searchResponse, 2L); - } - - // TODO: combine this test with SimpleSearchIT.testSimpleTerminateAfterCount after - // https://github.com/opensearch-project/OpenSearch/issues/8371 - public void testSimpleTerminateAfterCountWithSizeAndTrackHits() throws Exception { - prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); - ensureGreen(); - int numDocs = randomIntBetween(15, 29); - List docbuilders = new ArrayList<>(numDocs); - - for (int i = 1; i <= numDocs; i++) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setTerminateAfter(numDocs) - .setSize(0) - .setTrackTotalHits(true) - .get(); - assertEquals(0, searchResponse.getFailedShards()); - } - - public void testSimpleIndexSortEarlyTerminate() throws Exception { - prepareCreate("test").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") - ).setMapping("rank", "type=integer").get(); - ensureGreen(); - int max = randomIntBetween(3, 29); - List docbuilders = new ArrayList<>(max); - - for (int i = max - 1; i >= 0; i--) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - for (int i = 1; i < max; i++) { - searchResponse = client().prepareSearch("test") - .addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } - } - } - - public void testInsaneFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); - assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); - } - - public void testTooLargeFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); - assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); - assertWindowFails( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - ); - } - - public void testLargeFromAndSizeSucceeds() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkBySetting() throws Exception { - prepareCreate("idx").setSettings( - Settings.builder() - .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), - IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 - ) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .get(), - 1 - ); - } - - public void testTooLargeRescoreWindow() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertRescoreWindowFails(Integer.MAX_VALUE); - assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); - } - - public void testTooLargeRescoreOkBySetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. - defaultMaxWindow * 2 - ) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - // Note that this is the RESULT window - Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testQueryNumericFieldWithRegex() throws Exception { - assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); - ensureGreen("idx"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); - } - } - - public void testTermQueryBigInt() throws Exception { - prepareCreate("idx").setMapping("field", "type=keyword").get(); - ensureGreen("idx"); - - client().prepareIndex("idx") - .setId("1") - .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); - assertEquals(1, searchResponse.getHits().getTotalHits().value); - } - - public void testTooLongRegexInRegexpQuery() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); - StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); - while (regexp.length() <= defaultMaxRegexLength) { - regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); - } - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() - ); - assertThat( - e.getRootCause().getMessage(), - containsString( - "The length of regex [" - + regexp.length() - + "] used in the Regexp Query request has exceeded " - + "the allowed maximum of [" - + defaultMaxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ) - ); - } - - private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Result window is too large, from + size must be less than or equal to: [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); - } - - private void assertRescoreWindowFails(int windowSize) { - SearchRequestBuilder search = client().prepareSearch("idx") - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Rescore window [" - + windowSize - + "] is too large. It must " - + "be less than [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat( - e.toString(), - containsString( - "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." - ) - ); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 67e460653245e..7aae41d939cac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -6,28 +6,286 @@ * compatible open source license. */ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + package org.opensearch.search.simple; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.rescore.QueryRescorerBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.query.QueryBuilders.boolQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + +public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public SimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testSearchNullIndex() { + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + } + + public void testSearchRandomPreference() throws InterruptedException, ExecutionException { + createIndex("test"); + indexRandom( + true, + client().prepareIndex("test").setId("1").setSource("field", "value"), + client().prepareIndex("test").setId("2").setSource("field", "value"), + client().prepareIndex("test").setId("3").setSource("field", "value"), + client().prepareIndex("test").setId("4").setSource("field", "value"), + client().prepareIndex("test").setId("5").setSource("field", "value"), + client().prepareIndex("test").setId("6").setSource("field", "value") + ); + + int iters = scaledRandomIntBetween(10, 20); + for (int i = 0; i < iters; i++) { + String randomPreference = randomUnicodeOfLengthBetween(0, 4); + // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) + while (randomPreference.startsWith("_")) { + randomPreference = randomUnicodeOfLengthBetween(0, 4); + } + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .setPreference(randomPreference) + .get(); + assertHitCount(searchResponse, 6L); + + } + } + + public void testSimpleIp() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("from") + .field("type", "ip") + .endObject() + .startObject("to") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + + client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + + SearchResponse search = client().prepareSearch() + .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) + .get(); + + assertHitCount(search, 1L); + } + + public void testIpCidr() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("ip") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + refresh(); + + SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); + assertHitCount(search, 3L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); + assertHitCount(search, 4L); -public class SimpleSearchIT extends OpenSearchIntegTestCase { + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); + assertHitCount(search, 1L); - // TODO: Move this test to ParameterizedSimpleSearchIT after https://github.com/opensearch-project/OpenSearch/issues/8371 - public void testSimpleTerminateAfterCount() throws Exception { + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); + assertHitCount(search, 5L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); + assertHitCount(search, 0L); + + assertFailures( + client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), + RestStatus.BAD_REQUEST, + containsString("Expected [ip/prefix] but was [0/0/0/0/0]") + ); + } + + public void testSimpleId() { + createIndex("test"); + + client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); + assertHitCount(searchResponse, 1L); + } + + public void testSimpleDateRange() throws Exception { + createIndex("test"); + client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + ensureGreen(); + refresh(); + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) + .get(); + assertHitCount(searchResponse, 2L); + } + + public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); - int max = randomIntBetween(3, 29); List docbuilders = new ArrayList<>(max); for (int i = 1; i <= max; i++) { @@ -44,9 +302,12 @@ public void testSimpleTerminateAfterCount() throws Exception { searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) + .setSize(size) + .setTrackTotalHits(true) .get(); assertHitCount(searchResponse, i); assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } searchResponse = client().prepareSearch("test") @@ -57,4 +318,397 @@ public void testSimpleTerminateAfterCount() throws Exception { assertHitCount(searchResponse, max); assertFalse(searchResponse.isTerminatedEarly()); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") + public void testSimpleTerminateAfterCountSize0() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(0, max); + } + + public void testSimpleTerminateAfterCountRandomSize() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(randomIntBetween(1, max), max); + } + + public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = 29; + List docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(10) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(numDocs * 2) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertFalse(searchResponse.isTerminatedEarly()); + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") + public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(0); + } + + public void testSimpleTerminateAfterTrackTotalHitsUpToSize0() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(randomIntBetween(1, 29)); + } + + public void testSimpleIndexSortEarlyTerminate() throws Exception { + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") + ).setMapping("rank", "type=integer").get(); + ensureGreen(); + int max = randomIntBetween(3, 29); + List docbuilders = new ArrayList<>(max); + + for (int i = max - 1; i >= 0; i--) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + for (int i = 1; i < max; i++) { + searchResponse = client().prepareSearch("test") + .addDocValueField("rank") + .setTrackTotalHits(false) + .addSort("rank", SortOrder.ASC) + .setSize(i) + .get(); + assertNull(searchResponse.getHits().getTotalHits()); + for (int j = 0; j < i; j++) { + assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + } + + public void testInsaneFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); + assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); + } + + public void testTooLargeFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); + assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); + assertWindowFails( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + ); + } + + public void testLargeFromAndSizeSucceeds() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkBySetting() throws Exception { + prepareCreate("idx").setSettings( + Settings.builder() + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 + ) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .get(), + 1 + ); + } + + public void testTooLargeRescoreWindow() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertRescoreWindowFails(Integer.MAX_VALUE); + assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); + } + + public void testTooLargeRescoreOkBySetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. + defaultMaxWindow * 2 + ) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + // Note that this is the RESULT window + Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testQueryNumericFieldWithRegex() throws Exception { + assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); + ensureGreen("idx"); + + try { + client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); + } + } + + public void testTermQueryBigInt() throws Exception { + prepareCreate("idx").setMapping("field", "type=keyword").get(); + ensureGreen("idx"); + + client().prepareIndex("idx") + .setId("1") + .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); + + String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + + public void testTooLongRegexInRegexpQuery() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); + StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); + while (regexp.length() <= defaultMaxRegexLength) { + regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); + } + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + ); + assertThat( + e.getRootCause().getMessage(), + containsString( + "The length of regex [" + + regexp.length() + + "] used in the Regexp Query request has exceeded " + + "the allowed maximum of [" + + defaultMaxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ) + ); + } + + private void assertWindowFails(SearchRequestBuilder search) { + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Result window is too large, from + size must be less than or equal to: [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); + } + + private void assertRescoreWindowFails(int windowSize) { + SearchRequestBuilder search = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Rescore window [" + + windowSize + + "] is too large. It must " + + "be less than [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat( + e.toString(), + containsString( + "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." + ) + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 7880fc24fd846..1b8bd9694483d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -91,16 +91,16 @@ protected boolean forbidPrivateIndexSettings() { } public void testManyToManyGeoPoints() throws ExecutionException, InterruptedException, IOException { - /** - * | q | d1 | d2 - * | | | - * | | | - * | | | - * |2 o| x | x - * | | | - * |1 o| x | x - * |___________________________ - * 1 2 3 4 5 6 7 + /* + | q | d1 | d2 + | | | + | | | + | | | + |2 o| x | x + | | | + |1 o| x | x + |___________________________ + 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -187,11 +187,10 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce } public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedException, IOException { - /** - * q = (0, 0) - * - * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 - * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 + /* + q = (0, 0) + d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 + d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -256,16 +255,17 @@ protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] point } public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException { - /** q d1 d2 - * |4 o| x | x - * | | | - * |3 o| x | x - * | | | - * |2 o| x | x - * | | | - * |1 o|x |x - * |______________________ - * 1 2 3 4 5 6 + /* + q d1 d2 + |4 o| x | x + | | | + |3 o| x | x + | | | + |2 o| x | x + | | | + |1 o|x |x + |______________________ + 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 066d82483ae91..83f93ab9ff6b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -215,9 +215,6 @@ public void testShallowCloneNameAvailability() throws Exception { final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 5268d5ddf35af..91b0aa6438753 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1884,7 +1884,7 @@ public void testCannotCreateSnapshotsWithSameName() throws Exception { * This test ensures that when a shard is removed from a node (perhaps due to the node * leaving the cluster, then returning), all snapshotting of that shard is aborted, so * all Store references held onto by the snapshot are released. - * + *

          * See https://github.com/elastic/elasticsearch/issues/20876 */ public void testSnapshotCanceledOnRemovedShard() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java new file mode 100644 index 0000000000000..f50fc691fb232 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SystemRepositoryIT extends AbstractSnapshotIntegTestCase { + protected Path absolutePath; + final String systemRepoName = "system-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(systemRepoName, absolutePath)) + .build(); + } + + public void testRestrictedSettingsCantBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); + + RepositoryException e = expectThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(systemRepoName).setType("mock").setSettings(repoSettings).get() + ); + assertEquals( + e.getMessage(), + "[system-repo-name] trying to modify an unmodifiable attribute type of system " + + "repository from current value [fs] to new value [mock]" + ); + } + + public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); + + assertAcked( + client.admin().cluster().preparePutRepository(systemRepoName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 442268d513fc3..b46d27bafb2a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -50,7 +50,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; @@ -669,7 +669,7 @@ public void run() { public void testStressUpdateDeleteConcurrency() throws Exception { // We create an index with merging disabled so that deletes don't get merged away - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false))); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false))); ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java index c651689e21d3d..7f016caf22149 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -93,7 +93,7 @@ * provided the primaryTerm and seqNo still matches. The reason we cannot assume it will not take place after receiving the failure * is that a request can fork into two because of retries on disconnect, and now race against itself. The retry might complete (and do a * dirty or stale read) before the forked off request gets to execute, and that one might still subsequently succeed. - * + *

          * Such writes are not necessarily fully replicated and can be lost. There is no * guarantee that the previous value did not have the specified primaryTerm and seqNo *

        • CAS writes with other exceptions might or might not have taken place. If they have taken place, then after invocation but not diff --git a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java index 8f67bb87b5c42..0ada02a09d157 100644 --- a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java +++ b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java @@ -59,8 +59,8 @@ public static double getMbPerSec(Thread thread, MergePolicy.OneMerge merge) { * Returns total bytes written by this merge. **/ public static long getTotalBytesWritten(Thread thread, MergePolicy.OneMerge merge) throws IOException { - /** - * TODO: The number of bytes written during the merge should be accessible in OneMerge. + /* + TODO: The number of bytes written during the merge should be accessible in OneMerge. */ if (thread instanceof ConcurrentMergeScheduler.MergeThread) { return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter.getTotalBytesWritten(); diff --git a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java index 9a8c295d60ec7..2f00ea69207a7 100644 --- a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java +++ b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java @@ -38,7 +38,7 @@ /** * This class is just a workaround to make {@link QueryParser#handleBareFuzzy(String, Token, String)} accessible by sub-classes. * It is needed for {@link QueryParser}s that need to override the parsing of the slop in a fuzzy query (e.g. word~2, word~). - * + *

          * TODO: We should maybe rewrite this with the flexible query parser which matches the same syntax with more freedom. */ public class XQueryParser extends QueryParser { diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index e4c299ba572b1..9ca0491bc29f5 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -50,7 +50,7 @@ * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. - * + *

          * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. * This is safe for collapsing since the group sort is the same as the query sort. */ diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java index 6fde39b16a59a..4edcdea42b53b 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java @@ -43,13 +43,13 @@ * If the {@link BreakIterator} cannot find a passage smaller than the maximum length, * a secondary break iterator is used to re-split the passage at the first boundary after * maximum length. - * + *

          * This is useful to split passages created by {@link BreakIterator}s like `sentence` that * can create big outliers on semi-structured text. - * + *

          * * WARNING: This break iterator is designed to work with the {@link UnifiedHighlighter}. - * + *

          * TODO: We should be able to create passages incrementally, starting from the offset of the first match and expanding or not * depending on the offsets of subsequent matches. This is currently impossible because {@link FieldHighlighter} uses * only the first matching offset to derive the start and end of each passage. diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java index 4777b77cfbfed..9e9f6d1fd817d 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java @@ -38,8 +38,8 @@ /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + *

          * Space optimized random access capable array of values with a fixed number of bits/value. Values * are packed contiguously. * diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java index 0324522e9a68d..53cf4ed8e2273 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java @@ -25,8 +25,8 @@ /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + *

          * This class is similar to {@link Packed64} except that it trades space for speed by ensuring that * a single block needs to be read/written in order to read/write a value. */ diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java index f94a4531a7db9..4260d34ead7c9 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java @@ -35,9 +35,9 @@ /** * Forked from Lucene 8.x; removed in Lucene 8.9 - * + *

          * Todo: further investigate a better alternative - * + *

          * Simplistic compression for array of unsigned long values. Each value is {@code >= 0} and {@code * <=} a specified maximum value. The values are stored as packed ints, with each value consuming a * fixed number of bits. diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 4c5d5628b1aac..3632ba2d7304f 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -54,7 +54,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable { /** * Replaces current aliases with the provided aliases. - * + *

          * Sometimes aliases expressions need to be resolved to concrete aliases prior to executing the transport action. */ void replaceAliases(String... aliases); diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index df6414470ab6b..31f61e76c74ff 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -149,7 +149,7 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { /** * If set, only perform this request if the document was last modification was assigned this primary term. - * + *

          * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index afdb1d3a0bdd9..e3052b3b80035 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -341,7 +341,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t /** * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method. - * + *

          * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. diff --git a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java index 0930bd2741810..5948dd3e2b7cb 100644 --- a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java @@ -50,7 +50,7 @@ /** * Information about task operation failures - * + *

          * The class is final due to serialization limitations * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 625aa91e6ea7f..3dec781f0acf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -95,7 +95,7 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException { * Create a new allocation explain request. If {@code primary} is false, the first unassigned replica * will be picked for explanation. If no replicas are unassigned, the first assigned replica will * be explained. - * + *

          * Package private for testing. */ ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index dd36b3b8db3ab..874713b51d627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -46,6 +46,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -56,6 +57,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -127,6 +130,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchBackpressureStats searchBackpressureStats; + @Nullable + private SegmentReplicationRejectionStats segmentReplicationRejectionStats; + @Nullable private ClusterManagerThrottlingStats clusterManagerThrottlingStats; @@ -142,6 +148,12 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchPipelineStats searchPipelineStats; + @Nullable + private NodesResourceUsageStats resourceUsageStats; + + @Nullable + private RepositoriesStats repositoriesStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -198,6 +210,21 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); + } else { + resourceUsageStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + segmentReplicationRejectionStats = in.readOptionalWriteable(SegmentReplicationRejectionStats::new); + } else { + segmentReplicationRejectionStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); + } else { + repositoriesStats = null; + } } public NodeStats( @@ -216,6 +243,7 @@ public NodeStats( @Nullable DiscoveryStats discoveryStats, @Nullable IngestStats ingestStats, @Nullable AdaptiveSelectionStats adaptiveSelectionStats, + @Nullable NodesResourceUsageStats resourceUsageStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, @Nullable ShardIndexingPressureStats shardIndexingPressureStats, @@ -224,7 +252,9 @@ public NodeStats( @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, - @Nullable SearchPipelineStats searchPipelineStats + @Nullable SearchPipelineStats searchPipelineStats, + @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, + @Nullable RepositoriesStats repositoriesStats ) { super(node); this.timestamp = timestamp; @@ -241,6 +271,7 @@ public NodeStats( this.discoveryStats = discoveryStats; this.ingestStats = ingestStats; this.adaptiveSelectionStats = adaptiveSelectionStats; + this.resourceUsageStats = resourceUsageStats; this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.shardIndexingPressureStats = shardIndexingPressureStats; @@ -250,6 +281,8 @@ public NodeStats( this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; this.searchPipelineStats = searchPipelineStats; + this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; + this.repositoriesStats = repositoriesStats; } public long getTimestamp() { @@ -344,6 +377,11 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } + @Nullable + public NodesResourceUsageStats getResourceUsageStats() { + return resourceUsageStats; + } + @Nullable public ScriptCacheStats getScriptCacheStats() { return scriptCacheStats; @@ -387,6 +425,16 @@ public SearchPipelineStats getSearchPipelineStats() { return searchPipelineStats; } + @Nullable + public SegmentReplicationRejectionStats getSegmentReplicationRejectionStats() { + return segmentReplicationRejectionStats; + } + + @Nullable + public RepositoriesStats getRepositoriesStats() { + return repositoriesStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -430,6 +478,15 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(resourceUsageStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(segmentReplicationRejectionStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(repositoriesStats); + } } @Override @@ -520,7 +577,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getSearchPipelineStats() != null) { getSearchPipelineStats().toXContent(builder, params); } + if (getResourceUsageStats() != null) { + getResourceUsageStats().toXContent(builder, params); + } + if (getSegmentReplicationRejectionStats() != null) { + getSegmentReplicationRejectionStats().toXContent(builder, params); + } + if (getRepositoriesStats() != null) { + getRepositoriesStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index b0caa469033eb..fc72668d36413 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -213,7 +213,10 @@ public enum Metric { WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), - SEARCH_PIPELINE("search_pipeline"); + SEARCH_PIPELINE("search_pipeline"), + RESOURCE_USAGE_STATS("resource_usage_stats"), + SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), + REPOSITORIES("repositories"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 615abbaef845d..99cf42cfdc4d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -124,7 +124,10 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 8d82827f4ee50..e62c83490d810 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -67,7 +67,7 @@ /** * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. - * + *

          * The general flow is: *

            *
          • If this isn't being executed on the node to which the requested TaskId belongs then move to that node. diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index f26ccbc569cf0..774bffa10da4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -67,7 +67,7 @@ /** * Repository cleanup action for repository implementations based on {@link BlobStoreRepository}. - * + *

            * The steps taken by the repository cleanup operation are as follows: *

              *
            1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index ac975e917e056..6fb086f65497e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -88,7 +88,7 @@ public ActionRequestValidationException validate() { /** * The names of the repositories. * - * @return list of repositories + * @return array of repository names */ public String[] repositories() { return this.repositories; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 098a0e60142e7..f8c8df25be532 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -42,8 +42,8 @@ import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; @@ -83,7 +83,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); repositories.toXContent( builder, - new DelegatingMapParams(Collections.singletonMap(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params) + new DelegatingMapParams( + Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true", RepositoriesMetadata.HIDE_SYSTEM_REPOSITORY_SETTING, "true"), + params + ) ); builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 644f23d2bafe6..1eadab6b1352e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -100,7 +100,7 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - repositoriesService.registerRepository( + repositoriesService.registerOrUpdateRepository( request, ActionListener.delegateFailure( listener, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 9f8fc6067f405..0cfc24994768a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -261,7 +261,7 @@ public CreateSnapshotRequest indices(List indices) { /** * Returns a list of indices that should be included into the snapshot * - * @return list of indices + * @return array of index names */ @Override public String[] indices() { @@ -316,7 +316,7 @@ public CreateSnapshotRequest partial(boolean partial) { /** * If set to true the operation should wait for the snapshot completion before returning. - * + *

              * By default, the operation will return as soon as snapshot is initialized. It can be changed by setting this * flag to true. * diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 256850da0d503..c688b13ed9fdd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -497,7 +497,7 @@ public Settings indexSettings() { * this is the snapshot that this request restores. If the client can only identify a snapshot by its name then there is a risk that the * desired snapshot may be deleted and replaced by a new snapshot with the same name which is inconsistent with the original one. This * method lets us fail the restore if the precise snapshot we want is not available. - * + *

              * This is for internal use only and is not exposed in the REST layer. */ public RestoreSnapshotRequest snapshotUuid(String snapshotUuid) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 18098bc31432f..5efec8b876435 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -168,6 +168,9 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, + false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 7cda041b7d9cf..814a65e2a5bf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -81,7 +81,7 @@ public Builder addIndices(String... indices) { /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + *

              * For example indices that don't exist. */ @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 7cda0c1948d24..d048a3008f588 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -243,7 +243,7 @@ public CreateIndexRequest settings(Map source) { /** * Set the mapping for this index - * + *

              * The mapping should be in the form of a JSON string, with an outer _doc key *

                    *     .mapping("{\"_doc\":{\"properties\": ... }}")
              @@ -269,7 +269,7 @@ public CreateIndexRequest mapping(String source, XContentType xContentType) {
               
                   /**
                    * Adds mapping that will be added when the index gets created.
              -     *
              +     * 

              * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -296,7 +296,7 @@ private CreateIndexRequest mapping(BytesReference source, XContentType xContentT /** * Adds mapping that will be added when the index gets created. - * + *

              * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -432,7 +432,7 @@ public CreateIndexRequest source(String source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + *

              * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -458,7 +458,7 @@ public CreateIndexRequest source(byte[] source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + *

              * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(byte[] source, MediaType mediaType) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index 0cff01af536dc..384ae2e028bba 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -130,7 +130,7 @@ protected void writeNodesTo(StreamOutput out, List * NOTE: visible for testing * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index e3c812aedcfe7..6a1d04efa9714 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -46,7 +46,7 @@ /** * Request the mappings of specific fields - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d874b5bb6b1ac..94c88e30295a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -61,7 +61,7 @@ /** * Response object for {@link GetFieldMappingsRequest} API - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 4a2d2fbe8e950..32f751ceb1c5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -210,7 +210,7 @@ public String source() { /** * A specialized simplified mapping source method, takes the form of simple properties definition: * ("field1", "type=string,store=true"). - * + *

              * Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata * mapping fields will automatically be put on the top level mapping object. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index c46940fbfecf9..b82b68f6f9489 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -89,7 +89,7 @@ public void activeOnly(boolean activeOnly) { /** * Contains list of shard id's if shards are passed, empty otherwise. Array is empty by default. * - * @return list of shard id's if shards are passed, empty otherwise + * @return array of shard id's if shards are passed, empty otherwise */ public String[] shards() { return shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index b25bc94a5c8e2..353cdbbbc840c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -54,7 +54,7 @@ /** * Request class to swap index under an alias or increment data stream generation upon satisfying conditions - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index 55ee65d0a4973..a66fcc9e9bcf2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -51,7 +51,7 @@ /** * Response object for {@link RolloverRequest} API - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * diff --git a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java index 0d6d122e31261..25a2c081f8441 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java +++ b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java @@ -40,9 +40,9 @@ /** * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. - * + *

              * Notes for implementing custom subclasses: - * + *

              * The underlying mathematical principle of BackoffPolicy are progressions which can be either finite or infinite although * the latter should not be used for retrying. A progression can be mapped to a java.util.Iterator with the following * semantics: @@ -241,7 +241,7 @@ private static class ExponentialEqualJitterBackoffIterator implements Iterator * NOTE: If the value is greater than 30, there can be integer overflow * issues during delay calculation. **/ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 138f21f1f80e5..20dea872e0ad2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -209,7 +209,7 @@ public static class Failure implements Writeable, ToXContentFragment { /** * For write failures before operation was assigned a sequence number. - * + *

              * use @{link {@link #Failure(String, String, Exception, long, long)}} * to record operation sequence no with failure */ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java index baf64b3e80af6..141ec24fc390f 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java @@ -185,7 +185,7 @@ public Builder setGlobalPipeline(String globalPipeline) { /** * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). - * + *

              * The default is to back off exponentially. * * @see org.opensearch.action.bulk.BackoffPolicy#exponentialBackoff() diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index 65043da6c2684..f2f3077001a13 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -67,7 +67,7 @@ /** * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s * and allows to executes it in a single batch. - * + *

              * Note that we only support refresh on the bulk request not per item. * @see org.opensearch.client.Client#bulk(BulkRequest) * @@ -123,7 +123,7 @@ public BulkRequest add(DocWriteRequest... requests) { /** * Add a request to the current BulkRequest. - * + *

              * Note for internal callers: This method does not respect all global parameters. * Only the global index is applied to the request objects. * Global parameters would be respected if the request was serialized for a REST call as it is @@ -347,7 +347,7 @@ public final BulkRequest timeout(TimeValue timeout) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

              * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} @@ -364,7 +364,7 @@ public final BulkRequest pipeline(String globalPipeline) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

              - {@link BulkRequest#add(IndexRequest)} - {@link BulkRequest#add(UpdateRequest)} - {@link BulkRequest#add(DocWriteRequest)} @@ -404,7 +404,7 @@ public Boolean requireAlias() { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

              * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 726ba7ba119af..4a9b07c12821d 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -85,6 +85,11 @@ import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -133,6 +138,7 @@ public class TransportBulkAction extends HandledTransportAction() { - @Override - public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { - bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); - } - docStatusStats.inc(bulkItemResponse.status()); - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); - } + final Span span = tracer.startSpan(SpanBuilder.from("bulkShardAction", nodeId, bulkShardRequest)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + shardBulkAction.execute( + bulkShardRequest, + TraceableActionListener.create(ActionListener.runBefore(new ActionListener() { + @Override + public void onResponse(BulkShardResponse bulkShardResponse) { + for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { + // we may have no response if item failed + if (bulkItemResponse.getResponse() != null) { + bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + docStatusStats.inc(bulkItemResponse.status()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } - @Override - public void onFailure(Exception e) { - // create failures for all relevant requests - for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - final DocWriteRequest docWriteRequest = request.request(); - final BulkItemResponse bulkItemResponse = new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } - docStatusStats.inc(bulkItemResponse.status()); - responses.set(request.id(), bulkItemResponse); - } + @Override + public void onFailure(Exception e) { + // create failures for all relevant requests + for (BulkItemRequest request : requests) { + final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + final DocWriteRequest docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( + request.id(), + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) + ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); + } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } - private void finishHim() { - indicesService.addDocStatusStats(docStatusStats); - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - } - }, releasable::close)); + private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); + listener.onResponse( + new BulkResponse( + responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos) + ) + ); + } + }, releasable::close), span, tracer) + ); + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } } bulkRequest = null; // allow memory for bulk request items to be reclaimed before all items have been completed } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index fddda0ef1f9a7..268a6ed6f85b8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -99,6 +99,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportChannel; @@ -161,7 +162,8 @@ public TransportShardBulkAction( IndexingPressureService indexingPressureService, SegmentReplicationPressureService segmentReplicationPressureService, RemoteStorePressureService remoteStorePressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -177,7 +179,8 @@ public TransportShardBulkAction( EXECUTOR_NAME_FUNCTION, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index 81ae3d78e8ced..9a9e861ad8055 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -205,7 +205,7 @@ public long ifSeqNo() { /** * If set, only perform this delete request if the document was last modification was assigned this primary term. - * + *

              * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java index 039214459ac21..6cbabfec6d763 100644 --- a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java @@ -40,7 +40,7 @@ /** * Performs the delete operation. - * + *

              * Deprecated use TransportBulkAction with a single item instead * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index d0b78cfd49d4b..c7926e65cf792 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -76,14 +76,14 @@ /** * Index request to index a typed JSON document into a specific index and make it searchable. Best * created using {@link org.opensearch.client.Requests#indexRequest(String)}. - * + *

              * The index requires the {@link #index()}, {@link #id(String)} and * {@link #source(byte[], MediaType)} to be set. - * + *

              * The source (content to index) can be set in its bytes form using ({@link #source(byte[], MediaType)}), * its string form ({@link #source(String, MediaType)}) or using a {@link XContentBuilder} * ({@link #source(XContentBuilder)}). - * + *

              * If the {@link #id(String)} is not set, it will be automatically generated. * * @see IndexResponse @@ -388,7 +388,7 @@ public IndexRequest source(Map source, MediaType contentType) throws /** * Sets the document source to index. - * + *

              * Note, its preferable to either set it using {@link #source(XContentBuilder)} * or using the {@link #source(byte[], MediaType)}. */ @@ -591,7 +591,7 @@ public long ifSeqNo() { /** * If set, only perform this indexing request if the document was last modification was assigned this primary term. - * + *

              * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java index fe4f80bf0c065..ce32840f6751b 100644 --- a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java @@ -40,7 +40,7 @@ /** * Performs the index operation. - * + *

              * Allows for the following settings: *

                *
              • autoCreateIndex: When set to {@code true}, will automatically create an index if one does not exists. diff --git a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java index 9927affbc7442..2821f4fd7fadb 100644 --- a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java +++ b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java @@ -46,7 +46,7 @@ /** * A utility for forwarding ingest requests to ingest nodes in a round-robin fashion. - * + *

                * TODO: move this into IngestService and make index/bulk actions call that * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index 2234934499609..ec3ee981b646f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -218,7 +218,12 @@ private static List parseDocs(Map config) { String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, Metadata.ROUTING.getFieldName()); Long version = null; if (dataMap.containsKey(Metadata.VERSION.getFieldName())) { - version = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.VERSION.getFieldName()); + Object versionFieldValue = ConfigurationUtils.readObject(null, null, dataMap, Metadata.VERSION.getFieldName()); + if (versionFieldValue instanceof Integer || versionFieldValue instanceof Long) { + version = ((Number) versionFieldValue).longValue(); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_version], only int or long is accepted"); + } } VersionType versionType = null; if (dataMap.containsKey(Metadata.VERSION_TYPE.getFieldName())) { @@ -228,12 +233,25 @@ private static List parseDocs(Map config) { } IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, document); if (dataMap.containsKey(Metadata.IF_SEQ_NO.getFieldName())) { - Long ifSeqNo = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); - ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ifSeqNo); + Object ifSeqNoFieldValue = ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); + if (ifSeqNoFieldValue instanceof Integer || ifSeqNoFieldValue instanceof Long) { + ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ((Number) ifSeqNoFieldValue).longValue()); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_if_seq_no], only int or long is accepted"); + } } if (dataMap.containsKey(Metadata.IF_PRIMARY_TERM.getFieldName())) { - Long ifPrimaryTerm = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_PRIMARY_TERM.getFieldName()); - ingestDocument.setFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), ifPrimaryTerm); + Object ifPrimaryTermFieldValue = ConfigurationUtils.readObject( + null, + null, + dataMap, + Metadata.IF_PRIMARY_TERM.getFieldName() + ); + if (ifPrimaryTermFieldValue instanceof Integer || ifPrimaryTermFieldValue instanceof Long) { + ingestDocument.setFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), ((Number) ifPrimaryTermFieldValue).longValue()); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_if_primary_term], only int or long is accepted"); + } } ingestDocumentList.add(ingestDocument); } diff --git a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java index 032fe83e2220b..9d60706d1f100 100644 --- a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java @@ -54,6 +54,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportException; @@ -93,7 +94,8 @@ public TransportResyncReplicationAction( ShardStateAction shardStateAction, ActionFilters actionFilters, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -109,7 +111,8 @@ public TransportResyncReplicationAction( EXECUTOR_NAME_FUNCTION, true, /* we should never reject resync because of thread pool capacity on primary */ indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 1c0a1280ad550..14f57218ae1dc 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -215,6 +215,7 @@ public final void start() { 0, 0, buildTookInMillis(), + timeProvider.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null @@ -662,6 +663,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), + timeProvider.getPhaseTook(), failures, clusters, searchContextId diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 1d11ad2760675..87eb27bdb8255 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -98,18 +98,18 @@ public void executeCreatePit( task.getParentTaskId(), Collections.emptyMap() ); - /** - * This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is - * not supported for point in time + /* + This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is + not supported for point in time */ searchRequest.setCcsMinimizeRoundtrips(false); - /** - * Phase 1 of create PIT + /* + Phase 1 of create PIT */ executeCreatePit(searchTask, searchRequest, createPitListener); - /** - * Phase 2 of create PIT where we update pit id in pit contexts + /* + Phase 2 of create PIT where we update pit id in pit contexts */ createPitListener.whenComplete( searchResponse -> { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, @@ -167,9 +167,9 @@ void executeUpdatePitId( searchResponse.pointInTimeId() ) ); - /** - * store the create time ( same create time for all PIT contexts across shards ) to be used - * for list PIT api + /* + store the create time ( same create time for all PIT contexts across shards ) to be used + for list PIT api */ final long relativeStartNanos = System.nanoTime(); final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index da8f8f144eaf2..00e0345062d1c 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -277,6 +277,8 @@ public static void readMultiLineFormat( } else if ("cancel_after_time_interval".equals(entry.getKey()) || "cancelAfterTimeInterval".equals(entry.getKey())) { searchRequest.setCancelAfterTimeInterval(nodeTimeValue(value, null)); + } else if ("phase_took".equals(entry.getKey())) { + searchRequest.setPhaseTook(nodeBooleanValue(value)); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } @@ -374,6 +376,9 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild if (request.getCancelAfterTimeInterval() != null) { xContentBuilder.field("cancel_after_time_interval", request.getCancelAfterTimeInterval().getStringRep()); } + if (request.isPhaseTook() != null) { + xContentBuilder.field("phase_took", request.isPhaseTook()); + } xContentBuilder.endObject(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index cca85f92d2676..161a103cdf36a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -162,7 +162,7 @@ public AggregatedDfs aggregateDfs(Collection results) { * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. - * + *

                * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. * @@ -284,7 +284,7 @@ public List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { /** * Enriches search hits and completion suggestion hits from sortedDocs using fetchResultsArr, * merges suggestions, aggregations and profile results - * + *

                * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java new file mode 100644 index 0000000000000..9cbe2d2ffcb7d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryShapeVisitor; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.List; +import java.util.ListIterator; + +/** + * Class to categorize the search queries based on the type and increment the relevant counters. + * Class also logs the query shape. + */ +final class SearchQueryCategorizer { + + private static final Logger log = LogManager.getLogger(SearchQueryCategorizer.class); + + final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { + searchQueryCounters = new SearchQueryCounters(metricsRegistry); + } + + public void categorize(SearchSourceBuilder source) { + QueryBuilder topLevelQueryBuilder = source.query(); + + logQueryShape(topLevelQueryBuilder); + incrementQueryTypeCounters(topLevelQueryBuilder); + incrementQueryAggregationCounters(source.aggregations()); + incrementQuerySortCounters(source.sorts()); + } + + private void incrementQuerySortCounters(List> sorts) { + if (sorts != null && sorts.size() > 0) { + for (ListIterator> it = sorts.listIterator(); it.hasNext();) { + SortBuilder sortBuilder = it.next(); + String sortOrder = sortBuilder.order().toString(); + searchQueryCounters.sortCounter.add(1, Tags.create().addTag("sort_order", sortOrder)); + } + } + } + + private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { + if (aggregations != null) { + searchQueryCounters.aggCounter.add(1); + } + } + + private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryBuilderVisitor searchQueryVisitor = new SearchQueryCategorizingVisitor(searchQueryCounters); + topLevelQueryBuilder.visit(searchQueryVisitor); + } + + private void logQueryShape(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + topLevelQueryBuilder.visit(shapeVisitor); + log.debug("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java new file mode 100644 index 0000000000000..98f0169e69a5c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Class to visit the querybuilder tree and also track the level information. + * Increments the counters related to Search Query type. + */ +final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { + private static final String LEVEL_TAG = "level"; + private final int level; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizingVisitor(SearchQueryCounters searchQueryCounters) { + this(searchQueryCounters, 0); + } + + private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) { + this.searchQueryCounters = counters; + this.level = level; + } + + public void accept(QueryBuilder qb) { + if (qb instanceof BoolQueryBuilder) { + searchQueryCounters.boolCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof FunctionScoreQueryBuilder) { + searchQueryCounters.functionScoreCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchQueryBuilder) { + searchQueryCounters.matchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchPhraseQueryBuilder) { + searchQueryCounters.matchPhrasePrefixCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MultiMatchQueryBuilder) { + searchQueryCounters.multiMatchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof QueryStringQueryBuilder) { + searchQueryCounters.queryStringQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RangeQueryBuilder) { + searchQueryCounters.rangeCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RegexpQueryBuilder) { + searchQueryCounters.regexCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof TermQueryBuilder) { + searchQueryCounters.termCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof WildcardQueryBuilder) { + searchQueryCounters.wildcardCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else { + searchQueryCounters.otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } + } + + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return new SearchQueryCategorizingVisitor(searchQueryCounters, level + 1); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java new file mode 100644 index 0000000000000..7e0259af07701 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +/** + * Class contains all the Counters related to search query types. + */ +final class SearchQueryCounters { + private static final String UNIT = "1"; + private final MetricsRegistry metricsRegistry; + + // Counters related to Query types + public final Counter aggCounter; + public final Counter boolCounter; + public final Counter functionScoreCounter; + public final Counter matchCounter; + public final Counter matchPhrasePrefixCounter; + public final Counter multiMatchCounter; + public final Counter otherQueryCounter; + public final Counter queryStringQueryCounter; + public final Counter rangeCounter; + public final Counter regexCounter; + + public final Counter sortCounter; + public final Counter skippedCounter; + public final Counter termCounter; + public final Counter totalCounter; + public final Counter wildcardCounter; + + public SearchQueryCounters(MetricsRegistry metricsRegistry) { + this.metricsRegistry = metricsRegistry; + this.aggCounter = metricsRegistry.createCounter( + "search.query.type.agg.count", + "Counter for the number of top level agg search queries", + UNIT + ); + this.boolCounter = metricsRegistry.createCounter( + "search.query.type.bool.count", + "Counter for the number of top level and nested bool search queries", + UNIT + ); + this.functionScoreCounter = metricsRegistry.createCounter( + "search.query.type.functionscore.count", + "Counter for the number of top level and nested function score search queries", + UNIT + ); + this.matchCounter = metricsRegistry.createCounter( + "search.query.type.match.count", + "Counter for the number of top level and nested match search queries", + UNIT + ); + this.matchPhrasePrefixCounter = metricsRegistry.createCounter( + "search.query.type.matchphrase.count", + "Counter for the number of top level and nested match phrase prefix search queries", + UNIT + ); + this.multiMatchCounter = metricsRegistry.createCounter( + "search.query.type.multimatch.count", + "Counter for the number of top level and nested multi match search queries", + UNIT + ); + this.otherQueryCounter = metricsRegistry.createCounter( + "search.query.type.other.count", + "Counter for the number of top level and nested search queries that do not match any other categories", + UNIT + ); + this.queryStringQueryCounter = metricsRegistry.createCounter( + "search.query.type.querystringquery.count", + "Counter for the number of top level and nested queryStringQuery search queries", + UNIT + ); + this.rangeCounter = metricsRegistry.createCounter( + "search.query.type.range.count", + "Counter for the number of top level and nested range search queries", + UNIT + ); + this.regexCounter = metricsRegistry.createCounter( + "search.query.type.regex.count", + "Counter for the number of top level and nested regex search queries", + UNIT + ); + this.skippedCounter = metricsRegistry.createCounter( + "search.query.type.skipped.count", + "Counter for the number queries skipped due to error", + UNIT + ); + this.sortCounter = metricsRegistry.createCounter( + "search.query.type.sort.count", + "Counter for the number of top level sort search queries", + UNIT + ); + this.termCounter = metricsRegistry.createCounter( + "search.query.type.term.count", + "Counter for the number of top level and nested term search queries", + UNIT + ); + this.totalCounter = metricsRegistry.createCounter( + "search.query.type.total.count", + "Counter for the number of top level and nested search queries", + UNIT + ); + this.wildcardCounter = metricsRegistry.createCounter( + "search.query.type.wildcard.count", + "Counter for the number of top level and nested wildcard search queries", + UNIT + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 762022460ebdb..fb026dae630b7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -117,6 +117,8 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private String pipeline; + private Boolean phaseTook = null; + public SearchRequest() { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; @@ -209,6 +211,7 @@ private SearchRequest( this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; this.cancelAfterTimeInterval = searchRequest.cancelAfterTimeInterval; + this.phaseTook = searchRequest.phaseTook; } /** @@ -253,6 +256,9 @@ public SearchRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { pipeline = in.readOptionalString(); } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + phaseTook = in.readOptionalBoolean(); + } } @Override @@ -284,6 +290,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeOptionalString(pipeline); } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalBoolean(phaseTook); + } } @Override @@ -600,7 +609,7 @@ public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                  *
                • The request targets more than 128 shards
                • @@ -615,13 +624,27 @@ public void setPreFilterShardSize(int preFilterShardSize) { this.preFilterShardSize = preFilterShardSize; } + /** + * Returns value of user-provided phase_took query parameter for this search request. + */ + public Boolean isPhaseTook() { + return phaseTook; + } + + /** + * Sets value of phase_took query param if provided by user. Defaults to null. + */ + public void setPhaseTook(Boolean phaseTook) { + this.phaseTook = phaseTook; + } + /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold, or null if the threshold is unspecified. * This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                  * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                    *
                  • The request targets more than 128 shards
                  • @@ -719,7 +742,8 @@ public boolean equals(Object o) { && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) - && Objects.equals(pipeline, that.pipeline); + && Objects.equals(pipeline, that.pipeline) + && Objects.equals(phaseTook, that.phaseTook); } @Override @@ -740,7 +764,8 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - cancelAfterTimeInterval + cancelAfterTimeInterval, + phaseTook ); } @@ -783,6 +808,8 @@ public String toString() { + cancelAfterTimeInterval + ", pipeline=" + pipeline + + ", phaseTook=" + + phaseTook + "}"; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 861e1df0203d7..bc43b65e5d844 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -605,7 +605,7 @@ public SearchRequestBuilder setMaxConcurrentShardRequests(int maxConcurrentShard * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                    * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                      *
                    • The request targets more than 128 shards
                    • diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index a546311a1f668..96d07982d03db 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; @@ -63,7 +64,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; @@ -94,6 +97,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final ShardSearchFailure[] shardFailures; private final Clusters clusters; private final long tookInMillis; + private final PhaseTook phaseTook; public SearchResponse(StreamInput in) throws IOException { super(in); @@ -112,6 +116,11 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + phaseTook = in.readOptionalWriteable(PhaseTook::new); + } else { + phaseTook = null; + } skippedShards = in.readVInt(); pointInTimeId = in.readOptionalString(); } @@ -126,7 +135,32 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, null, shardFailures, clusters, null); + } + + public SearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + internalResponse, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + null, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( @@ -136,6 +170,7 @@ public SearchResponse( int successfulShards, int skippedShards, long tookInMillis, + PhaseTook phaseTook, ShardSearchFailure[] shardFailures, Clusters clusters, String pointInTimeId @@ -148,6 +183,7 @@ public SearchResponse( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.tookInMillis = tookInMillis; + this.phaseTook = phaseTook; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; assert scrollId == null || pointInTimeId == null : "SearchResponse can't have both scrollId [" @@ -210,6 +246,13 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } + /** + * How long the request took in each search phase. + */ + public PhaseTook getPhaseTook() { + return phaseTook; + } + /** * The total number of shards the search was executed on. */ @@ -298,6 +341,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); } builder.field(TOOK.getPreferredName(), tookInMillis); + if (phaseTook != null) { + phaseTook.toXContent(builder, params); + } builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); @@ -337,6 +383,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE Boolean terminatedEarly = null; int numReducePhases = 1; long tookInMillis = -1; + PhaseTook phaseTook = null; int successfulShards = -1; int totalShards = -1; int skippedShards = 0; // 0 for BWC @@ -401,6 +448,24 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE parser.skipChildren(); } } + } else if (PhaseTook.PHASE_TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + Map phaseTookMap = new HashMap<>(); + + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + try { + SearchPhaseName.valueOf(currentFieldName.toUpperCase(Locale.ROOT)); + phaseTookMap.put(currentFieldName, parser.longValue()); + } catch (final IllegalArgumentException ex) { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + phaseTook = new PhaseTook(phaseTookMap); } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; @@ -472,6 +537,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE successfulShards, skippedShards, tookInMillis, + phaseTook, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId @@ -491,6 +557,9 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(phaseTook); + } out.writeVInt(skippedShards); out.writeOptionalString(pointInTimeId); } @@ -604,6 +673,67 @@ public String toString() { } } + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.internal + */ + public static class PhaseTook implements ToXContentFragment, Writeable { + static final ParseField PHASE_TOOK = new ParseField("phase_took"); + private final Map phaseTookMap; + + public PhaseTook(Map phaseTookMap) { + this.phaseTookMap = phaseTookMap; + } + + private PhaseTook(StreamInput in) throws IOException { + this(in.readMap(StreamInput::readString, StreamInput::readLong)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PHASE_TOOK.getPreferredName()); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + if (phaseTookMap.containsKey(searchPhaseName.getName())) { + builder.field(searchPhaseName.getName(), phaseTookMap.get(searchPhaseName.getName())); + } else { + builder.field(searchPhaseName.getName(), 0); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PhaseTook phaseTook = (PhaseTook) o; + + if (phaseTook.phaseTookMap.equals(phaseTookMap)) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(phaseTookMap); + } + } + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index f90e98106f93f..054bd578cc56c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -236,6 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, + searchTimeProvider.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 2e447abd125c5..579a01f0dd932 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -53,7 +53,7 @@ /** * Base class that holds the various sections which a search response is * composed of (hits, aggs, suggestions etc.) and allows to retrieve them. - * + *

                      * The reason why this class exists is that the high level REST client uses its own classes * to parse aggregations into, which are not serializable. This is the common part that can be * shared between core and client. diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 614d576324026..b15a4b66e8870 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -76,7 +76,7 @@ private void deletePits(ActionListener listener, DeletePitReq /** * Delete all active PIT reader contexts leveraging list all PITs - * + *

                      * For Cross cluster PITs : * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index cff1005beff27..a6fb8453af4ff 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -88,6 +88,7 @@ import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -98,6 +99,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -136,6 +138,13 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_QUERY_METRICS_ENABLED_SETTING = Setting.boolSetting( + "search.query.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take // precedence over the cluster setting value public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; @@ -154,6 +163,14 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( + SEARCH_PHASE_TOOK_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -168,8 +185,14 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -202,6 +226,17 @@ public TransportSearchAction( this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); this.searchRequestStats = searchRequestStats; + this.metricsRegistry = metricsRegistry; + this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + } + + private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { + this.searchQueryMetricsEnabled = searchQueryMetricsEnabled; + if ((this.searchQueryMetricsEnabled == true) && this.searchQueryCategorizer == null) { + this.searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } } private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { @@ -252,6 +287,8 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust } /** + * Listener to track request-level tookTime and phase tookTimes from the coordinator. + * * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real @@ -261,11 +298,12 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider { + static final class SearchTimeProvider implements SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; + private boolean phaseTook = false; /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -291,6 +329,47 @@ long getAbsoluteStartMillis() { long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } + + public void setPhaseTook(boolean phaseTook) { + this.phaseTook = phaseTook; + } + + public boolean isPhaseTook() { + return phaseTook; + } + + SearchResponse.PhaseTook getPhaseTook() { + if (phaseTook) { + Map phaseTookMap = new HashMap<>(); + // Convert Map to Map for SearchResponse() + for (SearchPhaseName searchPhaseName : phaseStatsMap.keySet()) { + phaseTookMap.put(searchPhaseName.getName(), phaseStatsMap.get(searchPhaseName)); + } + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + phaseStatsMap.put( + context.getCurrentPhase().getSearchPhaseName(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) + ); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName); + } } @Override @@ -332,13 +411,6 @@ public void executeRequest( SinglePhaseSearchAction phaseSearchAction, ActionListener listener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction asyncSearchAction( @@ -355,7 +427,8 @@ public AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { return new AbstractSearchAsyncAction( actionName, @@ -419,6 +492,16 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); + + final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); + + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } + PipelinedRequest searchRequest; ActionListener listener; try { @@ -432,6 +515,14 @@ private void executeRequest( return; } + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(searchRequest.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + ActionListener rewriteListener = ActionListener.wrap(source -> { if (source != searchRequest.source()) { // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch @@ -462,7 +553,8 @@ private void executeRequest( clusterState, listener, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -483,7 +575,8 @@ private void executeRequest( clusterState, l, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ) ); } else { @@ -533,7 +626,8 @@ private void executeRequest( listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); }, listener::onFailure) ); @@ -622,6 +716,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), + timeProvider.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -811,7 +906,8 @@ private void executeLocalSearch( ClusterState clusterState, ActionListener listener, SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { executeSearch( (SearchTask) task, @@ -825,7 +921,8 @@ private void executeLocalSearch( listener, SearchResponse.Clusters.EMPTY, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } @@ -943,7 +1040,8 @@ private void executeSearch( ActionListener listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name @@ -1044,7 +1142,8 @@ private void executeSearch( listener, preFilterSearchShards, threadPool, - clusters + clusters, + searchRequestOperationsListener ).start(); } @@ -1127,15 +1226,30 @@ AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ); } - private List createSearchListenerList() { + private List createSearchListenerList(SearchRequest searchRequest, SearchTimeProvider timeProvider) { final List searchListenersList = new ArrayList<>(); + if (isRequestStatsEnabled) { searchListenersList.add(searchRequestStats); } + + // phase_took is enabled with request param and/or cluster setting + Boolean phaseTookRequestParam = searchRequest.isPhaseTook(); + if (phaseTookRequestParam == null) { // check cluster setting only when request param is undefined + if (clusterService.getClusterSettings().get(TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED)) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + } else if (phaseTookRequestParam == true) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + return searchListenersList; } @@ -1153,15 +1267,9 @@ private AbstractSearchAsyncAction searchAsyncAction ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } if (preFilter) { return new CanMatchPreFilterSearchPhase( logger, @@ -1192,7 +1300,8 @@ private AbstractSearchAsyncAction searchAsyncAction listener, false, threadPool, - clusters + clusters, + searchRequestOperationsListener ); return new SearchPhase(action.getName()) { @Override diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index daa11c2d7d80f..72aae210d61ae 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -81,7 +81,7 @@ private Releasable registerChildNode(TaskId parentTask) { /** * Use this method when the transport action call should result in creation of a new task associated with the call. - * + *

                      * This is a typical behavior. */ public final Task execute(Request request, ActionListener listener) { diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java index f5fb41dc5bae3..4d54ce51c923c 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java @@ -53,9 +53,9 @@ public abstract class BaseNodesRequest * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} * will be populated. Note that if {@link #concreteNodes} is not null, it will be used and nodeIds * will be ignored. - * + *

                      * See {@link DiscoveryNodes#resolveNodes} for a full description of the options. - * + *

                      * TODO: get rid of this and resolve it to concrete nodes in the rest layer **/ private String[] nodesIds; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 60c490a50575a..9f69d41d83f5b 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -479,7 +479,7 @@ public interface Primary< /** * Notifies the primary of a local checkpoint for the given allocation. - * + *

                      * Note: The primary will use this information to advance the global checkpoint if possible. * * @param allocationId allocation ID of the shard corresponding to the supplied local checkpoint diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index de5a92fdcc4b1..ddebdc5530e70 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -100,7 +100,7 @@ /** * Base class for requests that should be executed on a primary copy followed by replica copies. * Subclasses can resolve the target shard and provide implementation for primary and replica operations. - * + *

                      * The action samples cluster state on the receiving node to reroute to node with primary copy and on the * primary node to validate request before primary operation followed by sampling state again for resolving * nodes with replica copies to perform replication. @@ -134,6 +134,12 @@ public abstract class TransportReplicationAction< Setting.Property.NodeScope ); + /** + * Making primary and replica actions suffixes as constant + */ + public static final String PRIMARY_ACTION_SUFFIX = "[p]"; + public static final String REPLICA_ACTION_SUFFIX = "[r]"; + protected final ThreadPool threadPool; protected final TransportService transportService; protected final ClusterService clusterService; @@ -204,8 +210,8 @@ protected TransportReplicationAction( this.shardStateAction = shardStateAction; this.executor = executor; - this.transportPrimaryAction = actionName + "[p]"; - this.transportReplicaAction = actionName + "[r]"; + this.transportPrimaryAction = actionName + PRIMARY_ACTION_SUFFIX; + this.transportReplicaAction = actionName + REPLICA_ACTION_SUFFIX; this.initialRetryBackoffBound = REPLICATION_INITIAL_RETRY_BACKOFF_BOUND.get(settings); this.retryTimeout = REPLICATION_RETRY_TIMEOUT.get(settings); @@ -866,7 +872,7 @@ protected IndexShard getIndexShard(final ShardId shardId) { * Responsible for routing and retrying failed operations on the primary. * The actual primary operation is done in {@link ReplicationOperation} on the * node with primary copy. - * + *

                      * Resolves index and shard id for the request before routing it to target node * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 62cbfbde9780a..9ebfa8cfd0df8 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -59,6 +59,11 @@ import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,6 +87,7 @@ public abstract class TransportWriteAction< protected final SystemIndices systemIndices; private final Function executorFunction; + private final Tracer tracer; protected TransportWriteAction( Settings settings, @@ -97,7 +103,8 @@ protected TransportWriteAction( Function executorFunction, boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -119,6 +126,7 @@ protected TransportWriteAction( this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; this.systemIndices = systemIndices; + this.tracer = tracer; } protected String executor(IndexShard shard) { @@ -220,7 +228,12 @@ protected void shardOperationOnPrimary( threadPool.executor(executor).execute(new ActionRunnable>(listener) { @Override protected void doRun() { - dispatchedShardOperationOnPrimary(request, primary, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnPrimary", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnPrimary(request, primary, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -248,7 +261,12 @@ protected void shardOperationOnReplica(ReplicaRequest request, IndexShard replic threadPool.executor(executorFunction.apply(replica)).execute(new ActionRunnable(listener) { @Override protected void doRun() { - dispatchedShardOperationOnReplica(request, replica, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnReplica", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnReplica(request, replica, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -266,7 +284,7 @@ protected abstract void dispatchedShardOperationOnReplica( /** * Result of taking the action on the primary. - * + *

                      * NOTE: public for testing * * @opensearch.internal @@ -496,7 +514,7 @@ void run() { * A proxy for write operations that need to be performed on the * replicas, where a failure to execute the operation should fail * the replica shard and/or mark the replica as stale. - * + *

                      * This extends {@code TransportReplicationAction.ReplicasProxy} to do the * failing and stale-ing. * diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java index c474096ff94e4..56b34aea8248d 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java @@ -55,7 +55,7 @@ public abstract class SingleShardRequest * Whether index property is optional depends on the concrete implementation. If index property is required the * concrete implementation should use {@link #validateNonNullIndex()} to check if the index property has been set */ diff --git a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java index a10f74f7c2aa8..c2ae333b17055 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java @@ -128,7 +128,7 @@ public final Request setNodes(String... nodes) { /** * Returns the id of the task that should be processed. - * + *

                      * By default tasks with any ids are returned. */ public TaskId getTaskId() { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index b354482871521..946c3c2446173 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -589,7 +589,7 @@ public long ifSeqNo() { /** * If set, only perform this update request if the document was last modification was assigned this primary term. - * + *

                      * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index f9661e71d60e6..e43c42446de2c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -73,7 +73,7 @@ /** * We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code - * opensearch.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and + * opensearch.enforce.bootstrap.checks} is set to {@code true}. In this case we assume the node is running in production and * all bootstrap checks must pass. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java index 8e556df4b2f9b..91da34fb7216d 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java @@ -141,7 +141,7 @@ public boolean callback(long dwCtrlType) { /** * Memory protection constraints - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx */ public static final int PAGE_NOACCESS = 0x0001; @@ -151,7 +151,7 @@ public boolean callback(long dwCtrlType) { /** * Contains information about a range of pages in the virtual address space of a process. * The VirtualQuery and VirtualQueryEx functions use this structure. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx */ public static class MemoryBasicInformation extends Structure { @@ -186,7 +186,7 @@ public SizeT() { /** * Locks the specified region of the process's virtual address space into physical * memory, ensuring that subsequent access to the region will not incur a page fault. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx * * @param address A pointer to the base address of the region of pages to be locked. @@ -197,7 +197,7 @@ public SizeT() { /** * Retrieves information about a range of pages within the virtual address space of a specified process. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx * * @param handle A handle to the process whose memory information is queried. @@ -210,7 +210,7 @@ public SizeT() { /** * Sets the minimum and maximum working set sizes for the specified process. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx * * @param handle A handle to the process whose working set sizes is to be set. @@ -222,7 +222,7 @@ public SizeT() { /** * Retrieves a pseudo handle for the current process. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx * * @return a pseudo handle to the current process. @@ -231,7 +231,7 @@ public SizeT() { /** * Closes an open object handle. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx * * @param handle A valid handle to an open object. @@ -252,7 +252,7 @@ public SizeT() { /** * Creates or opens a new job object - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx * * @param jobAttributes security attributes @@ -263,7 +263,7 @@ public SizeT() { /** * Associates a process with an existing job - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx * * @param job job handle @@ -274,7 +274,7 @@ public SizeT() { /** * Basic limit information for a job object - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx */ public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference { @@ -316,7 +316,7 @@ protected List getFieldOrder() { /** * Get job limit and state information - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx * * @param job job handle @@ -330,7 +330,7 @@ protected List getFieldOrder() { /** * Set job limit and state information - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx * * @param job job handle diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index ab52ae5a43a2a..4d36efff0e192 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -188,9 +188,9 @@ void init(final boolean daemonize, final Path pidFile, final boolean quiet, Envi /** * Required method that's called by Apache Commons procrun when * running as a service on Windows, when the service is stopped. - * + *

                      * http://commons.apache.org/proper/commons-daemon/procrun.html - * + *

                      * NOTE: If this method is renamed and/or moved, make sure to * update opensearch-service.bat! */ diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java index 482087be1c8eb..28e1e7c53cb9c 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java @@ -44,7 +44,7 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener /** * Called to determine which nodes the acknowledgement is expected from. - * + *

                      * As this method will be called multiple times to determine the set of acking nodes, * it is crucial for it to return consistent results: Given the same listener instance * and the same node parameter, the method implementation should return the same result. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 50beeb1f03deb..bf8494cc36857 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -70,7 +70,7 @@ default boolean runOnlyOnMaster() { /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. - * + *

                      * Note that this method will be executed using system context. * * @param clusterChangedEvent the change event for this cluster state change, containing @@ -80,7 +80,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {} /** * Builds a concise description of a list of tasks (to be used in logging etc.). - * + *

                      * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. * This allows groupd task description but the submitting source. diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java index dd2232968114e..a38fc81bebc08 100644 --- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java @@ -182,7 +182,7 @@ public Map apply(Map map) { /** * Represents differences between two maps of objects and is used as base class for different map implementations. - * + *

                      * Implements serialization. How differences are applied is left to subclasses. * * @param the type of map keys @@ -381,9 +381,9 @@ public Integer readKey(StreamInput in) throws IOException { /** * Provides read and write operations to serialize map values. * Reading of values can be made dependent on map key. - * + *

                      * Also provides operations to distinguish whether map values are diffable. - * + *

                      * Should not be directly implemented, instead implement either * {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}. * @@ -517,7 +517,7 @@ public Diff readDiff(StreamInput in, K key) throws IOException { /** * Implementation of the ValueSerializer that wraps value and diff readers. - * + *

                      * Note: this implementation is ignoring the key. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 35490d2f37a49..e381b8f244bf3 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -82,7 +82,7 @@ * to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node. * Listens for changes in the number of data nodes and immediately submits a * ClusterInfoUpdateJob if a node has been added. - * + *

                      * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. * diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 042a4743ca25d..3e0c78099e6b4 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -189,7 +189,7 @@ public Snapshot snapshot() { /** * Returns list of shards that being restore and their status * - * @return list of shards + * @return map of shard id to shard restore status */ public Map shards() { return this.shards; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 39d05e672f977..3a506397bcac8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -46,11 +46,11 @@ public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish * process should apply this state to the cluster-manager as well! - * + *

                      * The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing. * The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not * committed and should be rejected. Any other exception signals that something bad happened but the change is committed. - * + *

                      * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index a339852e6ed8d..987a3e3ffa7d3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -638,6 +638,12 @@ public interface PersistedState extends Closeable { */ void setLastAcceptedState(ClusterState clusterState); + /** + * Returns the stats for the persistence layer for {@link CoordinationState}. + * @return PersistedStateStats + */ + PersistedStateStats getStats(); + /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index eb30460ca1b7f..a4ffab7fb70c9 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -56,6 +56,7 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -865,7 +866,16 @@ protected void doStart() { @Override public DiscoveryStats stats() { - return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats()); + ClusterStateStats clusterStateStats = clusterManagerService.getClusterStateStats(); + ArrayList stats = new ArrayList<>(); + Stream.of(PersistedStateRegistry.PersistedStateType.values()).forEach(stateType -> { + if (persistedStateRegistry.getPersistedState(stateType) != null + && persistedStateRegistry.getPersistedState(stateType).getStats() != null) { + stats.add(persistedStateRegistry.getPersistedState(stateType).getStats()); + } + }); + clusterStateStats.setPersistenceStats(stats); + return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats(), clusterStateStats); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java index 67ef82ee7b2e9..b77ede5471534 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java @@ -65,6 +65,11 @@ public void setLastAcceptedState(ClusterState clusterState) { this.acceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public long getCurrentTerm() { return currentTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 15eaf9c8bcc1e..f701a2f52277d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -483,10 +483,10 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) * 2. The joining node has to be a non-remote store backed if it is joining a non-remote store backed cluster. * Validates no remote store attributes are present in joining node as existing nodes in the cluster doesn't have * remote store attributes. - * + *

                      * A remote store backed node is the one which holds all the remote store attributes and a remote store backed * cluster is the one which has only homogeneous remote store backed nodes with same node attributes - * + *

                      * TODO: When we support moving from remote store cluster to non remote store and vice versa the this logic will * needs to be modified. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java new file mode 100644 index 0000000000000..1dc20e564ade2 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Persisted cluster state related stats. + * + * @opensearch.internal + */ +public class PersistedStateStats implements Writeable, ToXContentObject { + private String statsName; + private AtomicLong totalTimeInMillis = new AtomicLong(0); + private AtomicLong failedCount = new AtomicLong(0); + private AtomicLong successCount = new AtomicLong(0); + private Map extendedFields = new HashMap<>(); // keeping minimal extensibility + + public PersistedStateStats(String statsName) { + this.statsName = statsName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(successCount.get()); + out.writeVLong(failedCount.get()); + out.writeVLong(totalTimeInMillis.get()); + if (extendedFields.size() > 0) { + out.writeBoolean(true); + out.writeVInt(extendedFields.size()); + for (Map.Entry extendedField : extendedFields.entrySet()) { + out.writeString(extendedField.getKey()); + out.writeVLong(extendedField.getValue().get()); + } + } else { + out.writeBoolean(false); + } + } + + public PersistedStateStats(StreamInput in) throws IOException { + this.successCount = new AtomicLong(in.readVLong()); + this.failedCount = new AtomicLong(in.readVLong()); + this.totalTimeInMillis = new AtomicLong(in.readVLong()); + if (in.readBoolean()) { + int extendedFieldsSize = in.readVInt(); + this.extendedFields = new HashMap<>(); + for (int fieldNumber = 0; fieldNumber < extendedFieldsSize; fieldNumber++) { + extendedFields.put(in.readString(), new AtomicLong(in.readVLong())); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(statsName); + builder.field(Fields.SUCCESS_COUNT, getSuccessCount()); + builder.field(Fields.FAILED_COUNT, getFailedCount()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getTotalTimeInMillis()); + if (extendedFields.size() > 0) { + for (Map.Entry extendedField : extendedFields.entrySet()) { + builder.field(extendedField.getKey(), extendedField.getValue().get()); + } + } + builder.endObject(); + return builder; + } + + public void stateFailed() { + failedCount.incrementAndGet(); + } + + public void stateSucceeded() { + successCount.incrementAndGet(); + } + + /** + * Expects user to send time taken in milliseconds. + * + * @param timeTakenInUpload time taken in uploading the cluster state to remote + */ + public void stateTook(long timeTakenInUpload) { + totalTimeInMillis.addAndGet(timeTakenInUpload); + } + + public long getTotalTimeInMillis() { + return totalTimeInMillis.get(); + } + + public long getFailedCount() { + return failedCount.get(); + } + + public long getSuccessCount() { + return successCount.get(); + } + + protected void addToExtendedFields(String extendedField, AtomicLong extendedFieldValue) { + this.extendedFields.put(extendedField, extendedFieldValue); + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String SUCCESS_COUNT = "success_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 1570a84ab871f..128bd42fd7947 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -63,7 +63,7 @@ public class Reconfigurator { * and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a * five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the * loss of a further node before failing. - * + *

                      * We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we * require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that * as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable, diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index d06e89d9ea170..cbc63565228f9 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -219,13 +219,13 @@ public void writeTo(final StreamOutput out) throws IOException { /** * Checks if an inactive primary shard should cause the cluster health to go RED. - * + *

                      * An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is * unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle, * cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead. * However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at * some point, cluster health should still turn RED. - * + *

                      * NB: this method should *not* be called on active shards nor on non-primary shards. */ public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 0da948dc78c5d..85a203e5e059a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -61,7 +61,7 @@ /** * A collection of tombstones for explicitly marking indices as deleted in the cluster state. - * + *

                      * The cluster state contains a list of index tombstones for indices that have been * deleted in the cluster. Because cluster states are processed asynchronously by * nodes and a node could be removed from the cluster for a period of time, the @@ -250,7 +250,7 @@ public int getNumPurged() { /** * Purge tombstone entries. Returns the number of entries that were purged. - * + *

                      * Tombstones are purged if the number of tombstones in the list * is greater than the input parameter of maximum allowed tombstones. * Tombstones are purged until the list is equal to the maximum allowed. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 52df72b342b3e..5d8b751b241e2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -779,7 +779,7 @@ public long getAliasesVersion() { /** * The term of the current selected primary. This is a non-negative number incremented when * a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary. - * + *

                      * Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard * that can be indexed into) is larger than 0. See {@link IndexMetadataUpdater#applyChanges}. **/ @@ -1884,7 +1884,7 @@ public static Settings addHumanReadableSettings(Settings settings) { /** * Return the version the index was created from the provided index settings - * + *

                      * This looks for the presence of the {@link Version} object with key {@link IndexMetadata#SETTING_VERSION_CREATED} */ public static Version indexCreated(final Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index 272bb132197af..c5efb55316b84 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -369,7 +369,7 @@ public IndexTemplateMetadata build() { /** * Serializes the template to xContent, using the legacy format where the mappings are * nested under the type name. - * + *

                      * This method is used for serializing templates before storing them in the cluster metadata, * and also in the REST layer when returning a deprecated typed response. */ @@ -386,7 +386,7 @@ public static void toXContentWithTypes( /** * Serializes the template to xContent, making sure not to nest mappings under the * type name. - * + *

                      * Note that this method should currently only be used for creating REST responses, * and not when directly updating stored templates. Index templates are still stored * in the old, typed format, and have yet to be migrated to be typeless. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 146193b8d22c4..70c1d059a1b9e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -922,19 +922,26 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 if (!metadata1.coordinationMetadata.equals(metadata2.coordinationMetadata)) { return false; } - if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { + if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { return false; } - if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { + if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { return false; } - if (!metadata1.templates.equals(metadata2.templates())) { + if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { return false; } - if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { + return isGlobalResourcesMetadataEquals(metadata1, metadata2); + } + + /** + * Compares Metadata entities persisted in Remote Store. + */ + public static boolean isGlobalResourcesMetadataEquals(Metadata metadata1, Metadata metadata2) { + if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { return false; } - if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { + if (!metadata1.templates.equals(metadata2.templates())) { return false; } // Check if any persistent metadata needs to be saved @@ -1666,7 +1673,7 @@ private SortedMap buildIndicesLookup() { /** * Validates there isn't any index with a name that would clash with the future backing indices of the existing data streams. - * + *

                      * E.g., if data stream `foo` has backing indices [`.ds-foo-000001`, `.ds-foo-000002`] and the indices lookup contains indices * `.ds-foo-000001`, `.ds-foo-000002` and `.ds-foo-000006` this will throw an IllegalStateException (as attempting to rollover the * `foo` data stream from generation 5 to 6 will not be possible) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index b2861e566dd4b..78a22fe11f072 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -754,7 +754,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( /** * Parses the provided mappings json and the inheritable mappings from the templates (if any) * into a map. - * + *

                      * The template mappings are applied in the order they are encountered in the list (clients * should make sure the lower index, closer to the head of the list, templates have the highest * {@link IndexTemplateMetadata#order()}). This merging makes no distinction between field @@ -792,7 +792,7 @@ static Map parseV1Mappings( * Validates and creates the settings for the new index based on the explicitly configured settings via the * {@link CreateIndexClusterStateUpdateRequest}, inherited from templates and, if recovering from another index (ie. split, shrink, * clone), the resize settings. - * + *

                      * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -1009,7 +1009,7 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index /** * Validate and resolve the aliases explicitly set for the index, together with the ones inherited from the specified * templates. - * + *

                      * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -1252,6 +1252,7 @@ List getIndexSettingsValidationErrors( if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } + validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); if (indexName.isEmpty() || indexName.get().charAt(0) != '.') { // Apply aware replica balance validation only to non system indices int replicaCount = settings.getAsInt( @@ -1306,6 +1307,24 @@ private static List validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } + /** + * Validates {@code index.replication.type} is not set if {@code cluster.restrict.index.replication_type} is set to true. + * + * @param requestSettings settings passed in during index create request + * @param clusterSettings cluster setting + */ + private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (requestSettings.hasValue(SETTING_REPLICATION_TYPE) + && clusterSettings.get(IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING)) { + return Optional.of( + "index setting [index.replication.type] is not allowed to be set as [" + + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey() + + "=true]" + ); + } + return Optional.empty(); + } + /** * Validates the settings and mappings for shrinking an index. * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 91c996448ea8f..e30e878f1b31a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -166,7 +166,7 @@ public MetadataIndexStateService( /** * Closes one or more indices. - * + *

                      * Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards * to be terminated and finally closes the indices by moving their state to CLOSE. */ @@ -302,7 +302,7 @@ public TimeValue timeout() { /** * Step 1 - Start closing indices by adding a write block - * + *

                      * This step builds the list of indices to close (the ones explicitly requested that are not in CLOSE state) and adds a unique cluster * block (or reuses an existing one) to every index to close in the cluster state. After the cluster state is published, the shards * should start to reject writing operations and we can proceed with step 2. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 71b86ec853ce4..1093ac09777e7 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -747,7 +747,7 @@ public static Map> findConflictingV2Templates( /** * Return a map of v2 template names to their index patterns for v2 templates that would overlap * with the given template's index patterns. - * + *

                      * Based on the provided checkPriority and priority parameters this aims to report the overlapping * index templates regardless of the priority (ie. checkPriority == false) or otherwise overlapping * templates with the same priority as the given priority parameter (this is useful when trying to diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 1ab402e1bde4e..e3689d046193c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -54,6 +54,8 @@ import java.util.EnumSet; import java.util.List; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + /** * Contains metadata about registered snapshot repositories * @@ -68,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable implemen * in {@link org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse}. */ public static final String HIDE_GENERATIONS_PARAM = "hide_generations"; + public static final String HIDE_SYSTEM_REPOSITORY_SETTING = "hide_system_repository_setting"; private final List repositories; @@ -288,8 +291,12 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui if (repository.cryptoMetadata() != null) { repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); } + Settings settings = repository.settings(); + if (SYSTEM_REPOSITORY_SETTING.get(settings) && params.paramAsBoolean(HIDE_SYSTEM_REPOSITORY_SETTING, false)) { + settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); + } builder.startObject("settings"); - repository.settings().toXContent(builder, params); + settings.toXContent(builder, params); builder.endObject(); if (params.paramAsBoolean(HIDE_GENERATIONS_PARAM, false) == false) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 4e49b25eb5789..0c58aabf95207 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -553,7 +553,13 @@ public String toString() { sb.append('}'); } if (!attributes.isEmpty()) { - sb.append(attributes); + sb.append( + attributes.entrySet() + .stream() + .filter(entry -> !entry.getKey().startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)) // filter remote_store attributes + // from logging to reduce noise. + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); } return sb.toString(); } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 675d4e09e3be0..561f1b727efe0 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -356,7 +356,7 @@ public DiscoveryNode findByAddress(TransportAddress address) { /** * Returns the version of the node with the oldest version in the cluster that is not a client node - * + *

                      * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the oldest version in the cluster @@ -367,7 +367,7 @@ public Version getSmallestNonClientNodeVersion() { /** * Returns the version of the node with the youngest version in the cluster that is not a client node. - * + *

                      * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the youngest version in the cluster @@ -417,16 +417,16 @@ public DiscoveryNode resolveNode(String node) { /** * Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the * user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details. - * + *

                      * Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node * specification may either add or remove nodes. For instance: - * + *

                      * - _local, _cluster_manager (_master) and _all respectively add to the subset the local node, the currently-elected cluster_manager, and all the nodes * - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match * - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value * - role:true adds to the subset all nodes with a matching role * - role:false removes from the subset all nodes with a matching role. - * + *

                      * An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default. */ public String[] resolveNodes(String... nodes) { @@ -813,7 +813,7 @@ public Builder localNodeId(String localNodeId) { * Checks that a node can be safely added to this node collection. * * @return null if all is OK or an error message explaining why a node can not be added. - * + *

                      * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index b12698c8a320e..d77d44580798a 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -466,12 +466,12 @@ public Builder initializeAsRemoteStoreRestore( } for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { ShardId shardId = new ShardId(index, shardNumber); - if (forceRecoverAllPrimaries == false && indexShardRoutingTableMap.containsKey(shardId) == false) { + if (indexShardRoutingTableMap.containsKey(shardId) == false) { throw new IllegalStateException("IndexShardRoutingTable is not present for shardId: " + shardId); } IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingTableMap.get(shardId); - if (forceRecoverAllPrimaries || indexShardRoutingTable == null || indexShardRoutingTable.primaryShard().unassigned()) { + if (forceRecoverAllPrimaries || indexShardRoutingTable.primaryShard().unassigned()) { // Primary shard to be recovered from remote store. indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo)); // All the replica shards to be recovered from peer recovery. diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index 9cc09c6e4c31c..2dd57431d0375 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -463,7 +463,7 @@ private static Map rankNodes( * OpenSearch, however, we do not have that sort of broadcast-to-all behavior. In order to prevent a node that gets a high score and * then never gets any more requests, we must ensure it eventually returns to a more normal score and can be a candidate for serving * requests. - * + *

                      * This adjustment takes the "winning" node's statistics and adds the average of those statistics with each non-winning node. Let's say * the winning node had a queue size of 10 and a non-winning node had a queue of 18. The average queue size is (10 + 18) / 2 = 14 so the * non-winning node will have statistics added for a queue size of 14. This is repeated for the response time and service times as well. diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 5cef46689ffc7..b01e074ce40c2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -49,7 +49,7 @@ /** * Represents the recovery source of a shard. Available recovery types are: - * + *

                      * - {@link EmptyStoreRecoverySource} recovery from an empty store * - {@link ExistingStoreRecoverySource} recovery from an existing store * - {@link PeerRecoverySource} recovery from a primary on another node diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 4f7b935f15f93..5a4352653cc89 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -70,7 +70,7 @@ * {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}. * It can be either initialized as mutable or immutable (see {@link #RoutingNodes(ClusterState, boolean)}), allowing * or disallowing changes to its elements. - * + *

                      * The main methods used to update routing entries are: *

                        *
                      • {@link #initializeShard} initializes an unassigned shard. @@ -369,7 +369,7 @@ public ShardRouting activePrimary(ShardId shardId) { /** * Returns one active replica shard for the given shard id or null if * no active replica is found. - * + *

                        * Since replicas could possibly be on nodes with an older version of OpenSearch than * the primary is, this will return replicas on the highest version of OpenSearch when document * replication is enabled. @@ -395,7 +395,7 @@ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { /** * Returns one active replica shard for the given shard id or null if * no active replica is found. - * + *

                        * Since replicas could possibly be on nodes with a higher version of OpenSearch than * the primary is, this will return replicas on the oldest version of OpenSearch when segment * replication is enabled to allow for replica to read segments from primary. @@ -544,9 +544,9 @@ public Tuple relocateShard( /** * Applies the relevant logic to start an initializing shard. - * + *

                        * Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source. - * + *

                        * If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their * recovery source changes * @@ -605,9 +605,9 @@ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, Ro /** * Applies the relevant logic to handle a cancelled or failed shard. - * + *

                        * Moves the shard to unassigned or completely removes the shard (if relocation target). - * + *

                        * - If shard is a primary, this also fails initializing replicas. * - If shard is an active primary, this also promotes an active replica to primary (if such a replica exists). * - If shard is a relocating primary, this also removes the primary relocation target shard. diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java index cfdeed5c227b6..7f5109416494e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java @@ -14,7 +14,7 @@ /** * ShardMovementStrategy defines the order in which shard movement occurs. - * + *

                        * ShardMovementStrategy values or rather their string representation to be used with * {@link BalancedShardsAllocator#SHARD_MOVEMENT_STRATEGY_SETTING} via cluster settings. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index de36547b10707..5e748df5eed2d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -153,7 +153,7 @@ public enum Reason { /** * Captures the status of an unsuccessful allocation attempt for the shard, * causing it to remain in the unassigned state. - * + *

                        * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java index 19601483d5607..6fc0e535ef4dc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -26,7 +26,7 @@ * This {@link AwarenessReplicaBalance} gives total unique values of awareness attributes * It takes in effect only iff cluster.routing.allocation.awareness.attributes and * cluster.routing.allocation.awareness.force.zone.values both are specified. - * + *

                        * This is used in enforcing total copy of shard is a maximum of unique values of awareness attributes * Helps in balancing shards across all awareness attributes and ensuring high availability of data. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index f209e993518c1..ae2d4a0926194 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -36,14 +36,14 @@ public class ConstraintTypes { /** * Constraint to control number of shards of an index allocated on a single * node. - * + *

                        * In current weight function implementation, when a node has significantly * fewer shards than other nodes (e.g. during single new node addition or node * replacement), its weight is much less than other nodes. All shard allocations * at this time tend to land on the new node with skewed weight. This breaks * index level balance in the cluster, by creating all shards of the same index * on one node, often resulting in a hotspot on that node. - * + *

                        * This constraint is breached when balancer attempts to allocate more than * average shards per index per node. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ae8d92dae6811..7fc78b05880f3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -59,9 +59,9 @@ /** * Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in * {@link IndexMetadata} once the allocation round has completed. - * + *

                        * Primary terms are updated on primary initialization or when an active primary fails. - * + *

                        * Allocation ids are added for shards that become active and removed for shards that stop being active. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 90eff50fd9b5d..41ace0e7661fe 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -336,7 +336,7 @@ public boolean getPreferPrimaryBalance() { *

                      • *
                      * weight(node, index) = weightindex(node, index) + weightnode(node, index) - * + *

                      * package-private for testing */ static class WeightFunction { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 3365b58d92a63..75448520a499c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -530,7 +530,7 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { /** * Move started shards that can not be allocated to a node anymore - * + *

                      * For each shard to be moved this function executes a move operation * to the minimal eligible node with respect to the * weight function. If a shard is moved the shard will be set to diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 63d8c656f5049..29e9acca4e6c2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -63,12 +63,12 @@ public interface ShardsAllocator { * Returns the decision for where a shard should reside in the cluster. If the shard is unassigned, * then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned * state, then the {@link MoveDecision} will be non-null. - * + *

                      * This method is primarily used by the cluster allocation explain API to provide detailed explanations * for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method * may use the results of this method implementation to decide on allocating shards in the routing table * to the cluster. - * + *

                      * If an implementation of this interface does not support explaining decisions for a single shard through * the cluster explain API, then this method should throw a {@code UnsupportedOperationException}. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java index 7fffb0299af85..def0411853643 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java @@ -43,7 +43,7 @@ /** * A command to move shards in some way. - * + *

                      * Commands are registered in {@link NetworkModule}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java index 24c3fd7f34e4a..85f193c8c5580 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -109,7 +109,7 @@ public Decision canRebalance(RoutingAllocation allocation) { * Returns a {@link Decision} whether the given primary shard can be * forcibly allocated on the given node. This method should only be called * for unassigned primary shards where the node has a shard copy on disk. - * + *

                      * Note: all implementations that override this behavior should take into account * the results of {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} * before making a decision on force allocation, because force allocation should only diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 1bd47f111591d..2c7df6b81e676 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -73,23 +73,23 @@ /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially * being allocated to has enough disk space. - * + *

                      * It has three configurable settings, all of which can be changed dynamically: - * + *

                      * cluster.routing.allocation.disk.watermark.low is the low disk * watermark. New shards will not allocated to a node with usage higher than this, * although this watermark may be passed by allocating a shard. It defaults to * 0.85 (85.0%). - * + *

                      * cluster.routing.allocation.disk.watermark.high is the high disk * watermark. If a node has usage higher than this, shards are not allowed to * remain on the node. In addition, if allocating a shard to a node causes the * node to pass this watermark, it will not be allowed. It defaults to * 0.90 (90.0%). - * + *

                      * Both watermark settings are expressed in terms of used disk percentage, or * exact byte values for free space (like "500mb") - * + *

                      * cluster.routing.allocation.disk.threshold_enabled is used to * enable or disable this decider. It defaults to true (enabled). * @@ -119,7 +119,7 @@ public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) /** * Returns the size of all shards that are currently being relocated to * the node, but may not be finished transferring yet. - * + *

                      * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards */ public static long sizeOfRelocatingShards( diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 1680f2d8cad1d..c2eccdbc6ed26 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -44,7 +44,7 @@ /** * An allocation decider that prevents multiple instances of the same shard to * be allocated on the same {@code node}. - * + *

                      * The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent * allocation of multiple instances of the same shard on a single {@code host}, * based on host name and host address. Defaults to `false`, meaning that no diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 3a9fdf0ea10cf..26a04de31ce39 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -323,7 +323,7 @@ private Decision allocateShardCopies( * - the initializing shard routing if we want to assign the initializing shard to this node instead * - the started shard routing in case if we want to check if we can relocate to this node. * - the relocating shard routing if we want to relocate to this node now instead. - * + *

                      * This method returns the corresponding initializing shard that would be allocated to this node. */ private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 007508162ba14..8e94e7cab23d3 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -294,7 +294,7 @@ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. - * + *

                      * NOTE: the listener is not removed on timeout. This is the responsibility of the caller. */ public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 8da6b1b941f83..afc4e36ec352e 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -29,7 +29,7 @@ /** * This class does throttling on task submission to cluster manager node, it uses throttling key defined in various executors * as key for throttling. Throttling will be performed over task executor's class level, different task types have different executors class. - * + *

                      * Set specific setting to for setting the threshold of throttling of particular task type. * e.g : Set "cluster_manager.throttling.thresholds.put_mapping" to set throttling limit of "put mapping" tasks, * Set it to default value(-1) to disable the throttling for this task type. @@ -117,9 +117,9 @@ public static TimeValue getMaxDelayForRetry() { * * Register task to cluster service with task key, * * override getClusterManagerThrottlingKey method with above task key in task executor. * * Verify that throttled tasks would be retried from data nodes - * + *

                      * Added retry mechanism in TransportClusterManagerNodeAction, so it would be retried for customer generated tasks. - * + *

                      * If tasks are not getting retried then we can register with false flag, so user won't be able to configure threshold limits for it. */ protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) { @@ -236,7 +236,7 @@ public void onBeginSubmit(List tasks) { * It may start throwing throttling exception to older nodes in cluster. * Older version nodes will not be equipped to handle the throttling exception and * this may result in unexpected behavior where internal tasks would start failing without any retries. - * + *

                      * For every task submission request, it will validate if nodes version is greater or equal to 2.5.0 and set the startThrottling flag. * Once the startThrottling flag is set, it will not perform check for next set of tasks. */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java new file mode 100644 index 0000000000000..96683ce720d0b --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.cluster.coordination.PersistedStateStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Cluster state related stats. + * + * @opensearch.internal + */ +public class ClusterStateStats implements Writeable, ToXContentObject { + + private AtomicLong updateSuccess = new AtomicLong(0); + private AtomicLong updateTotalTimeInMillis = new AtomicLong(0); + private AtomicLong updateFailed = new AtomicLong(0); + private List persistenceStats = new ArrayList<>(); + + public ClusterStateStats() {} + + public long getUpdateSuccess() { + return updateSuccess.get(); + } + + public long getUpdateTotalTimeInMillis() { + return updateTotalTimeInMillis.get(); + } + + public long getUpdateFailed() { + return updateFailed.get(); + } + + public List getPersistenceStats() { + return persistenceStats; + } + + public void stateUpdated() { + updateSuccess.incrementAndGet(); + } + + public void stateUpdateFailed() { + updateFailed.incrementAndGet(); + } + + public void stateUpdateTook(long stateUpdateTime) { + updateTotalTimeInMillis.addAndGet(stateUpdateTime); + } + + public ClusterStateStats setPersistenceStats(List persistenceStats) { + this.persistenceStats = persistenceStats; + return this; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(updateSuccess.get()); + out.writeVLong(updateTotalTimeInMillis.get()); + out.writeVLong(updateFailed.get()); + out.writeVInt(persistenceStats.size()); + for (PersistedStateStats stats : persistenceStats) { + stats.writeTo(out); + } + } + + public ClusterStateStats(StreamInput in) throws IOException { + this.updateSuccess = new AtomicLong(in.readVLong()); + this.updateTotalTimeInMillis = new AtomicLong(in.readVLong()); + this.updateFailed = new AtomicLong(in.readVLong()); + int persistedStatsSize = in.readVInt(); + this.persistenceStats = new ArrayList<>(); + for (int statsNumber = 0; statsNumber < persistedStatsSize; statsNumber++) { + PersistedStateStats stats = new PersistedStateStats(in); + this.persistenceStats.add(stats); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.CLUSTER_STATE_STATS); + builder.startObject(Fields.OVERALL); + builder.field(Fields.UPDATE_COUNT, getUpdateSuccess()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getUpdateTotalTimeInMillis()); + builder.field(Fields.FAILED_COUNT, getUpdateFailed()); + builder.endObject(); + for (PersistedStateStats stats : persistenceStats) { + stats.toXContent(builder, params); + } + builder.endObject(); + return builder; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String CLUSTER_STATE_STATS = "cluster_state_stats"; + static final String OVERALL = "overall"; + static final String UPDATE_COUNT = "update_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 563b69dfd0e2a..07c3f93ae6486 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -112,7 +112,9 @@ public class MasterService extends AbstractLifecycleComponent { static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} */ + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} + */ @Deprecated static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; @@ -130,6 +132,7 @@ public class MasterService extends AbstractLifecycleComponent { private volatile Batcher taskBatcher; protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; private final ClusterManagerThrottlingStats throttlingStats; + private final ClusterStateStats stateStats; public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); @@ -147,6 +150,7 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP this::getMinNodeVersion, throttlingStats ); + this.stateStats = new ClusterStateStats(); this.threadPool = threadPool; } @@ -339,7 +343,7 @@ private TimeValue getTimeSince(long startTimeNanos) { return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); } - protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis) { + protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeNanos) { final PlainActionFuture fut = new PlainActionFuture() { @Override protected boolean blockingAllowed() { @@ -352,8 +356,12 @@ protected boolean blockingAllowed() { try { FutureUtils.get(fut); onPublicationSuccess(clusterChangedEvent, taskOutputs); + final long durationMillis = getTimeSince(startTimeNanos).millis(); + stateStats.stateUpdateTook(durationMillis); + stateStats.stateUpdated(); } catch (Exception e) { - onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeMillis, e); + stateStats.stateUpdateFailed(); + onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); } } @@ -464,7 +472,6 @@ public Builder incrementVersion(ClusterState clusterState) { * @param source the source of the cluster state update task * @param updateTask the full context for the cluster state update * task - * */ public & ClusterStateTaskListener> void submitStateUpdateTask( String source, @@ -490,7 +497,6 @@ public & Cluster * @param listener callback after the cluster state update task * completes * @param the type of the cluster state update task state - * */ public void submitStateUpdateTask( String source, @@ -947,7 +953,7 @@ void onNoLongerClusterManager() { /** * Functionality for register task key to cluster manager node. * - * @param taskKey - task key of task + * @param taskKey - task key of task * @param throttlingEnabled - throttling is enabled for task or not i.e does data node perform retries on it or not * @return throttling task key which needs to be passed while submitting task to cluster manager */ @@ -966,7 +972,6 @@ public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(Stri * that share the same executor will be executed * batches on this executor * @param the type of the cluster state update task state - * */ public void submitStateUpdateTasks( final String source, @@ -996,4 +1001,8 @@ public void submitStateUpdateTasks( } } + public ClusterStateStats getClusterStateStats() { + return stateStats; + } + } diff --git a/server/src/main/java/org/opensearch/common/Randomness.java b/server/src/main/java/org/opensearch/common/Randomness.java index 2c60e848b9db9..221bc95c41f31 100644 --- a/server/src/main/java/org/opensearch/common/Randomness.java +++ b/server/src/main/java/org/opensearch/common/Randomness.java @@ -127,7 +127,7 @@ public static Random get() { /** * Provides a secure source of randomness. - * + *

                      * This acts exactly similar to {@link #get()}, but returning a new {@link SecureRandom}. */ public static SecureRandom createSecure() { diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java index e73a9f5cd0bc9..97f304d776f5c 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java @@ -10,13 +10,10 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.stream.read.ReadContext; -import org.opensearch.common.blobstore.stream.read.listener.ReadContextListener; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.core.action.ActionListener; -import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.Path; /** * An extension of {@link BlobContainer} that adds {@link AsyncMultiStreamBlobContainer#asyncBlobUpload} to allow @@ -45,19 +42,6 @@ public interface AsyncMultiStreamBlobContainer extends BlobContainer { @ExperimentalApi void readBlobAsync(String blobName, ActionListener listener); - /** - * Asynchronously downloads the blob to the specified location using an executor from the thread pool. - * @param blobName The name of the blob for which needs to be downloaded. - * @param fileLocation The path on local disk where the blob needs to be downloaded. - * @param threadPool The threadpool instance which will provide the executor for performing a multipart download. - * @param completionListener Listener which will be notified when the download is complete. - */ - @ExperimentalApi - default void asyncBlobDownload(String blobName, Path fileLocation, ThreadPool threadPool, ActionListener completionListener) { - ReadContextListener readContextListener = new ReadContextListener(blobName, fileLocation, threadPool, completionListener); - readBlobAsync(blobName, readContextListener); - } - /* * Wether underlying blobContainer can verify integrity of data after transfer. If true and if expected * checksum is provided in WriteContext, then the checksum of transferred data is compared with expected checksum diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java index c64dc6b9e3ae4..82bc7a0baed50 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java @@ -144,8 +144,10 @@ public long getBlobSize() { } @Override - public List getPartStreams() { - return super.getPartStreams().stream().map(this::decryptInputStreamContainer).collect(Collectors.toList()); + public List getPartStreams() { + return super.getPartStreams().stream() + .map(supplier -> (StreamPartCreator) () -> supplier.get().thenApply(this::decryptInputStreamContainer)) + .collect(Collectors.toUnmodifiableList()); } /** diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 3cdb1ce30b68d..2e25a532b5abf 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -93,10 +93,10 @@ public interface BlobContainer { /** * Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}. - * + *

                      * Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively * request more data than they need right now and to re-use this stream for future needs if possible. - * + *

                      * Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may * depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by * bounding the length of the data requested. @@ -131,7 +131,7 @@ default long readBlobPreferredLength() { /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. - * + *

                      * This method assumes the container does not already contain a blob of the same blobName. If a blob by the * same name already exists, the operation will fail and an {@link IOException} will be thrown. * diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index ab40b1e2a082e..0f6646d37f950 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -31,6 +31,8 @@ package org.opensearch.common.blobstore; +import org.opensearch.cluster.metadata.RepositoryMetadata; + import java.io.Closeable; import java.util.Collections; import java.util.Map; @@ -47,10 +49,46 @@ public interface BlobStore extends Closeable { */ BlobContainer blobContainer(BlobPath path); + /** + * Returns statistics on the count of operations that have been performed on this blob store + */ /** * Returns statistics on the count of operations that have been performed on this blob store */ default Map stats() { return Collections.emptyMap(); } + + /** + * Returns details statistics of operations that have been performed on this blob store + */ + default Map> extendedStats() { + return Collections.emptyMap(); + } + + /** + * Reload the blob store inplace + */ + default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Metrics for BlobStore interactions + */ + enum Metric { + REQUEST_SUCCESS("request_success_total"), + REQUEST_FAILURE("request_failures_total"), + REQUEST_LATENCY("request_time_in_millis"), + RETRY_COUNT("request_retry_count_total"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + } + } diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java index 4d2d69e473438..a18ca8b9d5c39 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -75,6 +75,16 @@ public Map stats() { return blobStore.stats(); } + /** + * Retrieves extended statistics about the BlobStore. Delegates the call to the underlying BlobStore's extendedStats() method. + * + * @return A map containing extended statistics about the BlobStore. + */ + @Override + public Map> extendedStats() { + return blobStore.extendedStats(); + } + /** * Closes the EncryptedBlobStore by decrementing the reference count of the CryptoManager and closing the * underlying BlobStore. This ensures proper cleanup of resources. diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java index 394855671688a..b6644ffd16bab 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java @@ -69,7 +69,7 @@ /** * A file system based implementation of {@link org.opensearch.common.blobstore.BlobContainer}. * All blobs in the container are stored on a file system, the location of which is specified by the {@link BlobPath}. - * + *

                      * Note that the methods in this implementation of {@link org.opensearch.common.blobstore.BlobContainer} may * additionally throw a {@link java.lang.SecurityException} if the configured {@link java.lang.SecurityManager} * does not permit read and/or write access to the underlying files. @@ -258,7 +258,7 @@ public static String tempBlobName(final String blobName) { /** * Returns true if the blob is a leftover temporary blob. - * + *

                      * The temporary blobs might be left after failed atomic write operation. */ public static boolean isTempBlobName(final String blobName) { diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java index 2c305fb03c475..4bdce11ff4f9a 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -12,6 +12,8 @@ import org.opensearch.common.io.InputStreamContainer; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; /** * ReadContext is used to encapsulate all data needed by BlobContainer#readBlobAsync @@ -19,18 +21,18 @@ @ExperimentalApi public class ReadContext { private final long blobSize; - private final List partStreams; + private final List asyncPartStreams; private final String blobChecksum; - public ReadContext(long blobSize, List partStreams, String blobChecksum) { + public ReadContext(long blobSize, List asyncPartStreams, String blobChecksum) { this.blobSize = blobSize; - this.partStreams = partStreams; + this.asyncPartStreams = asyncPartStreams; this.blobChecksum = blobChecksum; } public ReadContext(ReadContext readContext) { this.blobSize = readContext.blobSize; - this.partStreams = readContext.partStreams; + this.asyncPartStreams = readContext.asyncPartStreams; this.blobChecksum = readContext.blobChecksum; } @@ -39,14 +41,30 @@ public String getBlobChecksum() { } public int getNumberOfParts() { - return partStreams.size(); + return asyncPartStreams.size(); } public long getBlobSize() { return blobSize; } - public List getPartStreams() { - return partStreams; + public List getPartStreams() { + return asyncPartStreams; + } + + /** + * Functional interface defining an instance that can create an async action + * to create a part of an object represented as an InputStreamContainer. + */ + @FunctionalInterface + public interface StreamPartCreator extends Supplier> { + /** + * Kicks off a async process to start streaming. + * + * @return When the returned future is completed, streaming has + * just begun. Clients must fully consume the resulting stream. + */ + @Override + CompletableFuture get(); } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java deleted file mode 100644 index aadd6e2ab304e..0000000000000 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListener.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.blobstore.stream.read.listener; - -import org.opensearch.common.annotation.InternalApi; -import org.opensearch.core.action.ActionListener; - -import java.util.concurrent.atomic.AtomicInteger; - -/** - * FileCompletionListener listens for completion of fetch on all the streams for a file, where - * individual streams are handled using {@link FilePartWriter}. The {@link FilePartWriter}(s) - * hold a reference to the file completion listener to be notified. - */ -@InternalApi -class FileCompletionListener implements ActionListener { - - private final int numberOfParts; - private final String fileName; - private final AtomicInteger completedPartsCount; - private final ActionListener completionListener; - - public FileCompletionListener(int numberOfParts, String fileName, ActionListener completionListener) { - this.completedPartsCount = new AtomicInteger(); - this.numberOfParts = numberOfParts; - this.fileName = fileName; - this.completionListener = completionListener; - } - - @Override - public void onResponse(Integer unused) { - if (completedPartsCount.incrementAndGet() == numberOfParts) { - completionListener.onResponse(fileName); - } - } - - @Override - public void onFailure(Exception e) { - completionListener.onFailure(e); - } -} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java index 84fd7ed9ffebf..1a403200249cd 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java @@ -8,83 +8,37 @@ package org.opensearch.common.blobstore.stream.read.listener; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.io.Channels; import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.io.InputStream; import java.nio.channels.FileChannel; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.UnaryOperator; /** * FilePartWriter transfers the provided stream into the specified file path using a {@link FileChannel} - * instance. It performs offset based writes to the file and notifies the {@link FileCompletionListener} on completion. + * instance. */ @InternalApi -class FilePartWriter implements Runnable { - - private final int partNumber; - private final InputStreamContainer blobPartStreamContainer; - private final Path fileLocation; - private final AtomicBoolean anyPartStreamFailed; - private final ActionListener fileCompletionListener; - private static final Logger logger = LogManager.getLogger(FilePartWriter.class); - +class FilePartWriter { // 8 MB buffer for transfer private static final int BUFFER_SIZE = 8 * 1024 * 2024; - public FilePartWriter( - int partNumber, - InputStreamContainer blobPartStreamContainer, - Path fileLocation, - AtomicBoolean anyPartStreamFailed, - ActionListener fileCompletionListener - ) { - this.partNumber = partNumber; - this.blobPartStreamContainer = blobPartStreamContainer; - this.fileLocation = fileLocation; - this.anyPartStreamFailed = anyPartStreamFailed; - this.fileCompletionListener = fileCompletionListener; - } - - @Override - public void run() { - // Ensures no writes to the file if any stream fails. - if (anyPartStreamFailed.get() == false) { - try (FileChannel outputFileChannel = FileChannel.open(fileLocation, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { - try (InputStream inputStream = blobPartStreamContainer.getInputStream()) { - long streamOffset = blobPartStreamContainer.getOffset(); - final byte[] buffer = new byte[BUFFER_SIZE]; - int bytesRead; - while ((bytesRead = inputStream.read(buffer)) != -1) { - Channels.writeToChannel(buffer, 0, bytesRead, outputFileChannel, streamOffset); - streamOffset += bytesRead; - } + public static void write(Path fileLocation, InputStreamContainer stream, UnaryOperator rateLimiter) throws IOException { + try (FileChannel outputFileChannel = FileChannel.open(fileLocation, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { + try (InputStream inputStream = rateLimiter.apply(stream.getInputStream())) { + long streamOffset = stream.getOffset(); + final byte[] buffer = new byte[BUFFER_SIZE]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + Channels.writeToChannel(buffer, 0, bytesRead, outputFileChannel, streamOffset); + streamOffset += bytesRead; } - } catch (IOException e) { - processFailure(e); - return; } - fileCompletionListener.onResponse(partNumber); - } - } - - void processFailure(Exception e) { - try { - Files.deleteIfExists(fileLocation); - } catch (IOException ex) { - // Die silently - logger.info("Failed to delete file {} on stream failure: {}", fileLocation, ex); - } - if (anyPartStreamFailed.getAndSet(true) == false) { - fileCompletionListener.onFailure(e); } } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java index 4338bddb3fbe7..c77f2384ace0d 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java @@ -10,56 +10,190 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.IOUtils; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.UUIDs; import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.UnaryOperator; /** * ReadContextListener orchestrates the async file fetch from the {@link org.opensearch.common.blobstore.BlobContainer} - * using a {@link ReadContext} callback. On response, it spawns off the download using multiple streams which are - * spread across a {@link ThreadPool} executor. + * using a {@link ReadContext} callback. On response, it spawns off the download using multiple streams. */ @InternalApi public class ReadContextListener implements ActionListener { - - private final String fileName; + private static final Logger logger = LogManager.getLogger(ReadContextListener.class); + private static final String DOWNLOAD_PREFIX = "download."; + private final String blobName; private final Path fileLocation; - private final ThreadPool threadPool; + private final String tmpFileName; + private final Path tmpFileLocation; private final ActionListener completionListener; - private static final Logger logger = LogManager.getLogger(ReadContextListener.class); + private final ThreadPool threadPool; + private final UnaryOperator rateLimiter; + private final int maxConcurrentStreams; - public ReadContextListener(String fileName, Path fileLocation, ThreadPool threadPool, ActionListener completionListener) { - this.fileName = fileName; + public ReadContextListener( + String blobName, + Path fileLocation, + ActionListener completionListener, + ThreadPool threadPool, + UnaryOperator rateLimiter, + int maxConcurrentStreams + ) { + this.blobName = blobName; this.fileLocation = fileLocation; - this.threadPool = threadPool; this.completionListener = completionListener; + this.threadPool = threadPool; + this.rateLimiter = rateLimiter; + this.maxConcurrentStreams = maxConcurrentStreams; + this.tmpFileName = DOWNLOAD_PREFIX + UUIDs.randomBase64UUID() + "." + blobName; + this.tmpFileLocation = fileLocation.getParent().resolve(tmpFileName); } @Override public void onResponse(ReadContext readContext) { - logger.trace("Streams received for blob {}", fileName); + logger.debug("Received {} parts for blob {}", readContext.getNumberOfParts(), blobName); final int numParts = readContext.getNumberOfParts(); - final AtomicBoolean anyPartStreamFailed = new AtomicBoolean(); - FileCompletionListener fileCompletionListener = new FileCompletionListener(numParts, fileName, completionListener); - - for (int partNumber = 0; partNumber < numParts; partNumber++) { - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - readContext.getPartStreams().get(partNumber), - fileLocation, - anyPartStreamFailed, - fileCompletionListener - ); - threadPool.executor(ThreadPool.Names.GENERIC).submit(filePartWriter); + final AtomicBoolean anyPartStreamFailed = new AtomicBoolean(false); + final GroupedActionListener groupedListener = new GroupedActionListener<>(getFileCompletionListener(), numParts); + final Queue queue = new ConcurrentLinkedQueue<>(readContext.getPartStreams()); + final StreamPartProcessor processor = new StreamPartProcessor( + queue, + anyPartStreamFailed, + tmpFileLocation, + groupedListener, + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY), + rateLimiter + ); + for (int i = 0; i < Math.min(maxConcurrentStreams, queue.size()); i++) { + processor.process(queue.poll()); } } + @SuppressForbidden(reason = "need to fsync once all parts received") + private ActionListener> getFileCompletionListener() { + return ActionListener.wrap(response -> { + logger.trace("renaming temp file [{}] to [{}]", tmpFileLocation, fileLocation); + try { + IOUtils.fsync(tmpFileLocation, false); + Files.move(tmpFileLocation, fileLocation, StandardCopyOption.ATOMIC_MOVE); + // sync parent dir metadata + IOUtils.fsync(fileLocation.getParent(), true); + completionListener.onResponse(blobName); + } catch (IOException e) { + logger.error("Unable to rename temp file + " + tmpFileLocation, e); + completionListener.onFailure(e); + } + }, e -> { + try { + Files.deleteIfExists(tmpFileLocation); + } catch (IOException ex) { + logger.warn("Unable to clean temp file {}", tmpFileLocation); + } + completionListener.onFailure(e); + }); + } + + /* + * For Tests + */ + Path getTmpFileLocation() { + return tmpFileLocation; + } + @Override public void onFailure(Exception e) { completionListener.onFailure(e); } + + private static class StreamPartProcessor { + private static final RuntimeException CANCELED_PART_EXCEPTION = new RuntimeException( + "Canceled part download due to previous failure" + ); + private final Queue queue; + private final AtomicBoolean anyPartStreamFailed; + private final Path fileLocation; + private final GroupedActionListener completionListener; + private final Executor executor; + private final UnaryOperator rateLimiter; + + private StreamPartProcessor( + Queue queue, + AtomicBoolean anyPartStreamFailed, + Path fileLocation, + GroupedActionListener completionListener, + Executor executor, + UnaryOperator rateLimiter + ) { + this.queue = queue; + this.anyPartStreamFailed = anyPartStreamFailed; + this.fileLocation = fileLocation; + this.completionListener = completionListener; + this.executor = executor; + this.rateLimiter = rateLimiter; + } + + private void process(ReadContext.StreamPartCreator supplier) { + if (supplier == null) { + return; + } + supplier.get().whenCompleteAsync((blobPartStreamContainer, throwable) -> { + if (throwable != null) { + processFailure(throwable instanceof Exception ? (Exception) throwable : new RuntimeException(throwable)); + } else if (anyPartStreamFailed.get()) { + processFailure(CANCELED_PART_EXCEPTION); + } else { + try { + FilePartWriter.write(fileLocation, blobPartStreamContainer, rateLimiter); + completionListener.onResponse(fileLocation.toString()); + + // Upon successfully completing a file part, pull another + // file part off the queue to trigger asynchronous processing + process(queue.poll()); + } catch (Exception e) { + processFailure(e); + } + } + }, executor); + } + + private void processFailure(Exception e) { + if (anyPartStreamFailed.getAndSet(true) == false) { + completionListener.onFailure(e); + + // Drain the queue of pending part downloads. These can be discarded + // since they haven't started any work yet, but the listener must be + // notified for each part. + Object item = queue.poll(); + while (item != null) { + completionListener.onFailure(CANCELED_PART_EXCEPTION); + item = queue.poll(); + } + } else { + completionListener.onFailure(e); + } + try { + Files.deleteIfExists(fileLocation); + } catch (IOException ex) { + // Die silently + logger.info("Failed to delete file {} on stream failure: {}", fileLocation, ex); + } + } + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java index b8c0b52f93a3c..3f341c878c3c7 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java @@ -15,5 +15,6 @@ */ public enum WritePriority { NORMAL, - HIGH + HIGH, + URGENT } diff --git a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java index de4e6ad433c55..c9b498c3ec6fa 100644 --- a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -204,7 +204,7 @@ private long limit(long bytes, String label, double overheadConstant, long memor /** * Add an exact number of bytes, not checking for tripping the * circuit breaker. This bypasses the overheadConstant multiplication. - * + *

                      * Also does not check with the parent breaker to see if the parent limit * has been exceeded. * diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index 0ebef1556424b..0b2b608b55df0 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -403,7 +403,7 @@ private V get(K key, long now, Consumer> onExpiration) { * If the specified key is not already associated with a value (or is mapped to null), attempts to compute its * value using the given mapping function and enters it into this map unless null. The load method for a given key * will be invoked at most once. - * + *

                      * Use of different {@link CacheLoader} implementations on the same key concurrently may result in only the first * loader function being called and the second will be returned the result provided by the first including any exceptions * thrown during the execution of the first. diff --git a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java index 5ce77cdc75fe5..de4304f0e1fba 100644 --- a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java +++ b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java @@ -49,15 +49,15 @@ /** * An immutable map whose writes result in a new copy of the map to be created. - * + *

                      * This is essentially a hash array mapped trie: inner nodes use a bitmap in * order to map hashes to slots by counting ones. In case of a collision (two * values having the same 32-bits hash), a leaf node is created which stores * and searches for values sequentially. - * + *

                      * Reads and writes both perform in logarithmic time. Null keys and values are * not supported. - * + *

                      * This structure might need to perform several object creations per write so * it is better suited for work-loads that are not too write-intensive. * @@ -250,7 +250,7 @@ public static T[] insertElement(final T[] array, final T element, final int * and use a bitmap in order to associate hashes to them. For example, if * an inner node contains 5 values, then 5 bits will be set in the bitmap * and the ordinal of the bit set in this bit map will be the slot number. - * + *

                      * As a consequence, the number of slots in an inner node is equal to the * number of one bits in the bitmap. * diff --git a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java index 0334d367ffdbc..1622457ba27cc 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java @@ -221,11 +221,11 @@ void validateLinearRing(CoordinateNode coordinates, boolean coerce) { @Override CoordinateNode validate(CoordinateNode coordinates, boolean coerce) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ if (coordinates.children == null || coordinates.children.isEmpty()) { throw new OpenSearchParseException( diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java index 393c238cb3b2f..8c566c4191e4f 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java @@ -665,7 +665,7 @@ public static GeoPoint parseFromString(String val) { /** * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". - * + *

                      * The precision is expressed as a number between 1 and 12 and indicates the length of geohash * used to represent geo points. * @@ -696,7 +696,7 @@ public static int parsePrecision(XContentParser parser) throws IOException, Open /** * Checks that the precision is within range supported by opensearch - between 1 and 12 - * + *

                      * Returns the precision value if it is in the range and throws an IllegalArgumentException if it * is outside the range. */ diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java index 56146fc8197be..93c7f4b93679a 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java +++ b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java @@ -63,7 +63,7 @@ public interface GeometryFormat { /** * Serializes the geometry into a standard Java object. - * + *

                      * For example, the GeoJson format returns the geometry as a map, while WKT returns a string. */ Object toXContentAsObject(ParsedFormat geometry); diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java index b436787220eb0..9e118ab2de3a5 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java @@ -177,11 +177,11 @@ public PolygonBuilder close() { } private static void validateLinearRing(LineStringBuilder lineString) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ List points = lineString.coordinates; if (points.size() < 4) { diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java index 37d42ce600b6d..8d473ae6721d2 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java @@ -52,7 +52,7 @@ /** * Parses shape geometry represented in geojson - * + *

                      * complies with geojson specification: https://tools.ietf.org/html/rfc7946 * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java index d99d1daf46a2a..b199da0f3691a 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java @@ -58,7 +58,7 @@ /** * Parses shape geometry represented in WKT format - * + *

                      * complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard * located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html * diff --git a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java index 8ba0bd7ee1be4..e481ffd460798 100644 --- a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java +++ b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java @@ -93,7 +93,7 @@ protected static long fmix(long k) { /** * Compute the hash of the MurmurHash3_x64_128 hashing function. - * + *

                      * Note, this hashing function might be used to persist hashes, so if the way hashes are computed * changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField). */ diff --git a/server/src/main/java/org/opensearch/common/inject/Initializer.java b/server/src/main/java/org/opensearch/common/inject/Initializer.java index e806eba6df707..b88b01c03c018 100644 --- a/server/src/main/java/org/opensearch/common/inject/Initializer.java +++ b/server/src/main/java/org/opensearch/common/inject/Initializer.java @@ -68,9 +68,8 @@ class Initializer { /** * Registers an instance for member injection when that step is performed. * - * @param instance an instance that optionally has members to be injected (each annotated with - * @param source the source location that this injection was requested - * @Inject). + * @param instance an instance that optionally has members to be injected (each annotated with {@code @Inject}). + * @param source the source location that this injection was requested */ public Initializable requestInjection(InjectorImpl injector, T instance, Object source, Set injectionPoints) { Objects.requireNonNull(source); diff --git a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java index fd9ed6e5b18fe..f1e5f5f22d527 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java +++ b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java @@ -40,7 +40,7 @@ public static void registerStreamables() { * Registers writers by class type */ private static void registerWriters() { - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerWriter(GeoPoint.class, (o, v) -> { o.writeByte((byte) 22); ((GeoPoint) v).writeTo(o); @@ -53,7 +53,7 @@ private static void registerWriters() { * NOTE: see {@code StreamOutput#WRITERS} for all registered ordinals */ private static void registerReaders() { - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerReader(Byte.valueOf((byte) 22), GeoPoint::new); } } diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java b/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java index 12d48a0b362ce..bf25e5b1b3923 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java @@ -125,6 +125,11 @@ public String pattern() { return pattern; } + @Override + public String printPattern() { + throw new UnsupportedOperationException("JodaDateFormatter does not have a print pattern"); + } + @Override public Locale locale() { return printer.getLocale(); diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java index 0de6dec1c25bd..ae38e9a6a8073 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java @@ -46,7 +46,7 @@ /** * A parser for date/time formatted text with optional date math. - * + *

                      * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index e259d5d9e3e33..ed324e4e62d8f 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -47,7 +47,7 @@ * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] - * + *

                      * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from * LoggingEvent into a multiline string * diff --git a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java index 0ffd633e5a967..17b75ab22f3ed 100644 --- a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java +++ b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java @@ -55,7 +55,7 @@ * mappings as segments that were not known before are added and prevents the * structure from growing indefinitely by registering close listeners on these * segments so that at any time it only tracks live segments. - * + *

                      * NOTE: This is heavy. Avoid using this class unless absolutely required. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index 8870e26c373e9..821d48fccf48c 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -131,7 +131,7 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); - private final List transportIntercetors = new ArrayList<>(); + private final List transportInterceptors = new ArrayList<>(); /** * Creates a network module that custom networking classes can be plugged into. @@ -149,9 +149,13 @@ public NetworkModule( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - Tracer tracer + Tracer tracer, + List transportInterceptors ) { this.settings = settings; + if (transportInterceptors != null) { + transportInterceptors.forEach(this::registerTransportInterceptor); + } for (NetworkPlugin plugin : plugins) { Map> httpTransportFactory = plugin.getHttpTransports( settings, @@ -174,16 +178,17 @@ public NetworkModule( pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, - networkService + networkService, + tracer ); for (Map.Entry> entry : transportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); } - List transportInterceptors = plugin.getTransportInterceptors( + List pluginTransportInterceptors = plugin.getTransportInterceptors( namedWriteableRegistry, threadPool.getThreadContext() ); - for (TransportInterceptor interceptor : transportInterceptors) { + for (TransportInterceptor interceptor : pluginTransportInterceptors) { registerTransportInterceptor(interceptor); } } @@ -263,7 +268,7 @@ public Supplier getTransportSupplier() { * Registers a new {@link TransportInterceptor} */ private void registerTransportInterceptor(TransportInterceptor interceptor) { - this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + this.transportInterceptors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); } /** @@ -271,7 +276,7 @@ private void registerTransportInterceptor(TransportInterceptor interceptor) { * @see #registerTransportInterceptor(TransportInterceptor) */ public TransportInterceptor getTransportInterceptor() { - return new CompositeTransportInterceptor(this.transportIntercetors); + return new CompositeTransportInterceptor(this.transportInterceptors); } static final class CompositeTransportInterceptor implements TransportInterceptor { diff --git a/server/src/main/java/org/opensearch/common/rounding/Rounding.java b/server/src/main/java/org/opensearch/common/rounding/Rounding.java index 857031bc783f4..41e808b64f7d9 100644 --- a/server/src/main/java/org/opensearch/common/rounding/Rounding.java +++ b/server/src/main/java/org/opensearch/common/rounding/Rounding.java @@ -45,7 +45,7 @@ /** * A strategy for rounding long values. - * + *

                      * Use the java based Rounding class where applicable * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index 8b7a2a82e5cb1..117ed66fcb451 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -254,7 +254,7 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu /** * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. - * + *

                      * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { @@ -265,7 +265,7 @@ public synchronized void addSettingsUpdateConsumer(Consumer consumer, * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. The validator is run across all specified settings before the settings are * applied. - * + *

                      * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer( diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 032027384f106..c2c6effc3336f 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -130,9 +130,12 @@ import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; @@ -287,6 +290,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -374,6 +378,8 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, + TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, + TransportSearchAction.SEARCH_QUERY_METRICS_ENABLED_SETTING, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, @@ -457,6 +463,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkService.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY, ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING, ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING, @@ -652,6 +659,10 @@ public void apply(Settings value, Settings current, Settings previous) { SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, SegmentReplicationPressureService.MAX_ALLOWED_STALE_SHARDS, + // Settings related to resource trackers + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, @@ -671,9 +682,16 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, - IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING ) ) ); @@ -692,6 +710,12 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING ), List.of(FeatureFlags.TELEMETRY), - List.of(TelemetrySettings.TRACER_ENABLED_SETTING, TelemetrySettings.TRACER_SAMPLER_PROBABILITY) + List.of( + TelemetrySettings.TRACER_ENABLED_SETTING, + TelemetrySettings.TRACER_SAMPLER_PROBABILITY, + TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING, + TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, + TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING + ) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 90abc0a0765c1..387b0c9753574 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -39,7 +39,8 @@ protected FeatureFlagSettings( FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, - FeatureFlags.TELEMETRY_SETTING + FeatureFlags.TELEMETRY_SETTING, + FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 5b2afc44600bd..62e8faf33e1fa 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -45,9 +45,11 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexingSlowLog; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.LogByteSizeMergePolicyProvider; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.SearchSlowLog; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.fielddata.IndexFieldDataService; @@ -120,14 +122,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, IndexSortConfig.INDEX_SORT_MISSING_SETTING, @@ -202,6 +204,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, + IndexSettings.INDEX_MERGE_POLICY, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, IndexSettings.DEFAULT_SEARCH_PIPELINE, // Settings for Searchable Snapshots @@ -212,6 +221,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, // Settings for remote store enablement IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, @@ -275,7 +285,7 @@ public boolean isPrivateSetting(String key) { case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: - case MergePolicyConfig.INDEX_MERGE_ENABLED: + case MergePolicyProvider.INDEX_MERGE_ENABLED: // we keep the shrink settings for BWC - this can be removed in 8.0 // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 case "index.shrink.source.uuid": diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index f25dd872fc703..1ad3b7ab8875a 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -88,7 +88,7 @@ /** * A disk based container for sensitive settings in OpenSearch. - * + *

                      * Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call * {@link #decrypt(char[])} with the keystore password, or an empty char array if * {@link #hasPassword()} is {@code false}. Loading and decrypting should happen @@ -147,7 +147,7 @@ private static class Entry { /** * The number of bits for the cipher key. - * + *

                      * Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits. * This can be increased to 256 bits once minimum java 9 is the minimum java version. * See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce @@ -234,7 +234,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { /** * Loads information about the OpenSearch keystore from the provided config directory. - * + *

                      * {@link #decrypt(char[])} must be called before reading or writing any entries. * Returns {@code null} if no keystore exists. */ @@ -358,7 +358,7 @@ private Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv) /** * Decrypts the underlying keystore data. - * + *

                      * This may only be called once. */ public void decrypt(char[] password) throws GeneralSecurityException, IOException { diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java index f2ccc01a4c7e6..1855270b016b3 100644 --- a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java @@ -45,7 +45,7 @@ /** * A secure setting. - * + *

                      * This class allows access to settings from the OpenSearch keystore. * * @opensearch.internal @@ -152,7 +152,7 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett /** * A setting which contains a sensitive string. - * + *

                      * This may be any sensitive string, e.g. a username, a password, an auth token, etc. */ public static Setting secureString(String name, Setting fallback, Property... properties) { @@ -179,7 +179,7 @@ public static Setting insecureString(String name, String secureNam /** * A setting which contains a file. Reading the setting opens an input stream to the file. - * + *

                      * This may be any sensitive file, e.g. a set of credentials normally in plaintext. */ public static Setting secureFile(String name, Setting fallback, Property... properties) { diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 91e39e38f0379..0557884f0f8ad 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -101,7 +101,7 @@ @PublicApi(since = "1.0.0") public final class Settings implements ToXContentFragment { - public static final Settings EMPTY = new Builder().build(); + public static final Settings EMPTY = new Settings(Collections.emptyMap(), null); /** The raw settings from the full key to raw string value. */ private final Map settings; @@ -757,7 +757,7 @@ public Set keySet() { @PublicApi(since = "1.0.0") public static class Builder { - public static final Settings EMPTY_SETTINGS = new Builder().build(); + public static final Settings EMPTY_SETTINGS = Settings.EMPTY; // we use a sorted map for consistent serialization when using getAsMap() private final Map map = new TreeMap<>(); @@ -1222,7 +1222,7 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { /** * Checks that all settings(except archived settings and wildcards) in the builder start with the specified prefix. - * + *

                      * If a setting doesn't start with the prefix, the builder appends the prefix to such setting. */ public Builder normalizePrefix(String prefix) { diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatter.java b/server/src/main/java/org/opensearch/common/time/DateFormatter.java index d57fd441b9bf4..c98bd853dfced 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatter.java @@ -126,6 +126,14 @@ default String formatJoda(DateTime dateTime) { */ String pattern(); + /** + * A name based format for this formatter. Can be one of the registered formatters like epoch_millis or + * a configured format like HH:mm:ss + * + * @return The name of this formatter + */ + String printPattern(); + /** * Returns the configured locale of the date formatter * @@ -147,7 +155,7 @@ default String formatJoda(DateTime dateTime) { */ DateMathParser toDateMathParser(); - static DateFormatter forPattern(String input) { + static DateFormatter forPattern(String input, String printPattern, Boolean canCacheFormatter) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); @@ -158,7 +166,28 @@ static DateFormatter forPattern(String input) { List patterns = splitCombinedPatterns(format); List formatters = patterns.stream().map(DateFormatters::forPattern).collect(Collectors.toList()); - return JavaDateFormatter.combined(input, formatters); + DateFormatter printFormatter = formatters.get(0); + if (Strings.hasLength(printPattern)) { + String printFormat = strip8Prefix(printPattern); + try { + printFormatter = DateFormatters.forPattern(printFormat); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid print format: " + e.getMessage(), e); + } + } + return JavaDateFormatter.combined(input, formatters, printFormatter, canCacheFormatter); + } + + static DateFormatter forPattern(String input) { + return forPattern(input, null, false); + } + + static DateFormatter forPattern(String input, String printPattern) { + return forPattern(input, printPattern, false); + } + + static DateFormatter forPattern(String input, Boolean canCacheFormatter) { + return forPattern(input, null, canCacheFormatter); } static String strip8Prefix(String input) { diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatters.java b/server/src/main/java/org/opensearch/common/time/DateFormatters.java index 6c8b9282d8a77..e74ab687b903b 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatters.java @@ -2172,10 +2172,10 @@ static DateFormatter forPattern(String input) { * or Instant.from(accessor). This results in a huge performance penalty and should be prevented * This method prevents exceptions by querying the accessor for certain capabilities * and then act on it accordingly - * + *

                      * This action assumes that we can reliably fall back to some defaults if not all parts of a * zoned date time are set - * + *

                      * - If a zoned date time is passed, it is returned * - If no timezone is found, ZoneOffset.UTC is used * - If we find a time and a date, converting to a ZonedDateTime is straight forward, diff --git a/server/src/main/java/org/opensearch/common/time/DateMathParser.java b/server/src/main/java/org/opensearch/common/time/DateMathParser.java index f6573eaa90286..7088d6cb7a498 100644 --- a/server/src/main/java/org/opensearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/DateMathParser.java @@ -64,12 +64,12 @@ default Instant parse(String text, LongSupplier now, boolean roundUpProperty, Da /** * Parse text, that potentially contains date math into the milliseconds since the epoch - * + *

                      * Examples are - * + *

                      * 2014-11-18||-2y subtracts two years from the input date * now/m rounds the current time to minute granularity - * + *

                      * Supported rounding units are * y year * M month diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 021b8a3be8b23..7ab395a1117e7 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -342,7 +342,7 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) { /** * Rounds the given utc milliseconds sicne the epoch down to the next unit millis - * + *

                      * Note: This does not check for correctness of the result, as this only works with units smaller or equal than a day * In order to ensure the performance of this methods, there are no guards or checks in it * diff --git a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java index f3459a5857b9e..7fc39e063efb5 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java @@ -32,12 +32,12 @@ * This class has been copied from different locations within the joda time package, as * these methods fast when used for rounding, as they do not require conversion to java * time objects - * + *

                      * This code has been copied from jodatime 2.10.1 * The source can be found at https://github.com/JodaOrg/joda-time/tree/v2.10.1 - * + *

                      * See following methods have been copied (along with required helper variables) - * + *

                      * - org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis(int year) * - org.joda.time.chrono.BasicChronology.getYear(int year) * - org.joda.time.chrono.BasicGJChronology.getMonthOfYear(long utcMillis, int year) diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index 07013a3dc75f2..f711b14aeb928 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -32,6 +32,7 @@ package org.opensearch.common.time; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import java.text.ParsePosition; @@ -51,6 +52,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -67,9 +69,12 @@ class JavaDateFormatter implements DateFormatter { } private final String format; + private final String printFormat; private final DateTimeFormatter printer; private final List parsers; private final JavaDateFormatter roundupParser; + private final Boolean canCacheLastParsedFormatter; + private volatile DateTimeFormatter lastParsedformatter = null; /** * A round up formatter @@ -93,8 +98,18 @@ JavaDateFormatter getRoundupParser() { } // named formatters use default roundUpParser + JavaDateFormatter( + String format, + String printFormat, + DateTimeFormatter printer, + Boolean canCacheLastParsedFormatter, + DateTimeFormatter... parsers + ) { + this(format, printFormat, printer, ROUND_UP_BASE_FIELDS, canCacheLastParsedFormatter, parsers); + } + JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { - this(format, printer, ROUND_UP_BASE_FIELDS, parsers); + this(format, format, printer, false, parsers); } private static final BiConsumer ROUND_UP_BASE_FIELDS = (builder, parser) -> { @@ -111,8 +126,10 @@ JavaDateFormatter getRoundupParser() { // subclasses override roundUpParser JavaDateFormatter( String format, + String printFormat, DateTimeFormatter printer, BiConsumer roundupParserConsumer, + Boolean canCacheLastParsedFormatter, DateTimeFormatter... parsers ) { if (printer == null) { @@ -128,6 +145,8 @@ JavaDateFormatter getRoundupParser() { } this.printer = printer; this.format = format; + this.printFormat = printFormat; + this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; if (parsers.length == 0) { this.parsers = Collections.singletonList(printer); @@ -138,6 +157,15 @@ JavaDateFormatter getRoundupParser() { this.roundupParser = new RoundUpFormatter(format, roundUp); } + JavaDateFormatter( + String format, + DateTimeFormatter printer, + BiConsumer roundupParserConsumer, + DateTimeFormatter... parsers + ) { + this(format, format, printer, roundupParserConsumer, false, parsers); + } + /** * This is when the RoundUp Formatters are created. In further merges (with ||) it will only append them to a list. * || is not expected to be provided as format when a RoundUp formatter is created. It will be splitted before in @@ -164,36 +192,61 @@ private List createRoundUpParser( return null; } - public static DateFormatter combined(String input, List formatters) { + public static DateFormatter combined( + String input, + List formatters, + DateFormatter printFormatter, + Boolean canCacheLastParsedFormatter + ) { assert formatters.size() > 0; + assert printFormatter != null; List parsers = new ArrayList<>(formatters.size()); List roundUpParsers = new ArrayList<>(formatters.size()); - DateTimeFormatter printer = null; + assert printFormatter instanceof JavaDateFormatter; + JavaDateFormatter javaPrintFormatter = (JavaDateFormatter) printFormatter; + DateTimeFormatter printer = javaPrintFormatter.getPrinter(); for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; - if (printer == null) { - printer = javaDateFormatter.getPrinter(); - } parsers.addAll(javaDateFormatter.getParsers()); roundUpParsers.addAll(javaDateFormatter.getRoundupParser().getParsers()); } - return new JavaDateFormatter(input, printer, roundUpParsers, parsers); + return new JavaDateFormatter( + input, + javaPrintFormatter.format, + printer, + roundUpParsers, + parsers, + canCacheLastParsedFormatter & FeatureFlags.isEnabled(FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING) + ); // check if caching is enabled } private JavaDateFormatter( String format, + String printFormat, DateTimeFormatter printer, List roundUpParsers, - List parsers + List parsers, + Boolean canCacheLastParsedFormatter ) { this.format = format; + this.printFormat = printFormat; this.printer = printer; this.roundupParser = roundUpParsers != null ? new RoundUpFormatter(format, roundUpParsers) : null; this.parsers = parsers; + this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; + } + + private JavaDateFormatter( + String format, + DateTimeFormatter printer, + List roundUpParsers, + List parsers + ) { + this(format, format, printer, roundUpParsers, parsers, false); } JavaDateFormatter getRoundupParser() { @@ -222,7 +275,7 @@ public TemporalAccessor parse(String input) { * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject * which does not throw exceptions when parsing failed. - * + *

                      * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) * patterns. Joda does not suffer from this. * https://bugs.openjdk.java.net/browse/JDK-8188771 @@ -233,13 +286,23 @@ public TemporalAccessor parse(String input) { */ private TemporalAccessor doParse(String input) { if (parsers.size() > 1) { + Object object = null; + if (canCacheLastParsedFormatter && lastParsedformatter != null) { + ParsePosition pos = new ParsePosition(0); + object = lastParsedformatter.toFormat().parseObject(input, pos); + if (parsingSucceeded(object, input, pos)) { + return (TemporalAccessor) object; + } + } for (DateTimeFormatter formatter : parsers) { ParsePosition pos = new ParsePosition(0); - Object object = formatter.toFormat().parseObject(input, pos); + object = formatter.toFormat().parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { + lastParsedformatter = formatter; return (TemporalAccessor) object; } } + throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); } return this.parsers.get(0).parse(input); @@ -255,12 +318,14 @@ public DateFormatter withZone(ZoneId zoneId) { if (zoneId.equals(zone())) { return this; } - List parsers = this.parsers.stream().map(p -> p.withZone(zoneId)).collect(Collectors.toList()); + List parsers = new CopyOnWriteArrayList<>( + this.parsers.stream().map(p -> p.withZone(zoneId)).collect(Collectors.toList()) + ); List roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withZone(zoneId)) .collect(Collectors.toList()); - return new JavaDateFormatter(format, printer.withZone(zoneId), roundUpParsers, parsers); + return new JavaDateFormatter(format, printFormat, printer.withZone(zoneId), roundUpParsers, parsers, canCacheLastParsedFormatter); } @Override @@ -269,12 +334,14 @@ public DateFormatter withLocale(Locale locale) { if (locale.equals(locale())) { return this; } - List parsers = this.parsers.stream().map(p -> p.withLocale(locale)).collect(Collectors.toList()); + List parsers = new CopyOnWriteArrayList<>( + this.parsers.stream().map(p -> p.withLocale(locale)).collect(Collectors.toList()) + ); List roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withLocale(locale)) .collect(Collectors.toList()); - return new JavaDateFormatter(format, printer.withLocale(locale), roundUpParsers, parsers); + return new JavaDateFormatter(format, printFormat, printer.withLocale(locale), roundUpParsers, parsers, canCacheLastParsedFormatter); } @Override @@ -287,6 +354,11 @@ public String pattern() { return format; } + @Override + public String printPattern() { + return printFormat; + } + @Override public Locale locale() { return this.printer.getLocale(); diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java index 0536324b6516b..340901e7ac8e2 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java @@ -51,7 +51,7 @@ /** * A parser for date/time formatted text with optional date math. - * + *

                      * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. diff --git a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java index ca63c170c0ccd..e4315f8699206 100644 --- a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java +++ b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java @@ -34,14 +34,14 @@ /** * Performs binary search on an arbitrary data structure. - * + *

                      * To do a search, create a subclass and implement custom {@link #compare(int)} and {@link #distance(int)} methods. - * + *

                      * {@link BinarySearcher} knows nothing about the value being searched for or the underlying data structure. * These things should be determined by the subclass in its overridden methods. - * + *

                      * Refer to {@link BigArrays.DoubleBinarySearcher} for an example. - * + *

                      * NOTE: this class is not thread safe * * @opensearch.internal @@ -74,7 +74,7 @@ private int getClosestIndex(int index1, int index2) { /** * Uses a binary search to determine the index of the element within the index range {from, ... , to} that is * closest to the search value. - * + *

                      * Unlike most binary search implementations, the value being searched for is not an argument to search method. * Rather, this value should be stored by the subclass along with the underlying array. * diff --git a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java index 8bc3ca3affb12..67dd4b848f4c0 100644 --- a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java @@ -45,7 +45,7 @@ * A utility class for multi threaded operation that needs to be cancellable via interrupts. Every cancellable operation should be * executed via {@link #execute(Interruptible)}, which will capture the executing thread and make sure it is interrupted in the case * of cancellation. - * + *

                      * Cancellation policy: This class does not support external interruption via Thread#interrupt(). Always use #cancel() instead. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java index 0c792b37ccfa9..28b55f70855d6 100644 --- a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java @@ -46,33 +46,33 @@ /** * An approximate set membership datastructure - * + *

                      * CuckooFilters are similar to Bloom Filters in usage; values are inserted, and the Cuckoo * can be asked if it has seen a particular value before. Because the structure is approximate, * it can return false positives (says it has seen an item when it has not). False negatives * are not possible though; if the structure says it _has not_ seen an item, that can be * trusted. - * + *

                      * The filter can "saturate" at which point the map has hit it's configured load factor (or near enough * that a large number of evictions are not able to find a free slot) and will refuse to accept * any new insertions. - * + *

                      * NOTE: this version does not support deletions, and as such does not save duplicate * fingerprints (e.g. when inserting, if the fingerprint is already present in the * candidate buckets, it is not inserted). By not saving duplicates, the CuckooFilter * loses the ability to delete values. But not by allowing deletions, we can save space * (do not need to waste slots on duplicate fingerprints), and we do not need to worry * about inserts "overflowing" a bucket because the same item has been repeated repeatedly - * + *

                      * NOTE: this CuckooFilter exposes a number of Expert APIs which assume the caller has * intimate knowledge about how the algorithm works. It is recommended to use * {@link SetBackedScalingCuckooFilter} instead. - * + *

                      * Based on the paper: - * + *

                      * Fan, Bin, et al. "Cuckoo filter: Practically better than bloom." * Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014. - * + *

                      * https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf * * @opensearch.internal @@ -200,7 +200,7 @@ public int getCount() { /** * Returns the number of buckets that has been chosen based * on the initial configuration - * + *

                      * Expert-level API */ int getNumBuckets() { @@ -209,7 +209,7 @@ int getNumBuckets() { /** * Returns the number of bits used per entry - * + *

                      * Expert-level API */ int getBitsPerEntry() { @@ -220,7 +220,7 @@ int getBitsPerEntry() { * Returns the cached fingerprint mask. This is simply a mask for the * first bitsPerEntry bits, used by {@link CuckooFilter#fingerprint(int, int, int)} * to generate the fingerprint of a hash - * + *

                      * Expert-level API */ int getFingerprintMask() { @@ -230,7 +230,7 @@ int getFingerprintMask() { /** * Returns an iterator that returns the long[] representation of each bucket. The value * inside each long will be a fingerprint (or 0L, representing empty). - * + *

                      * Expert-level API */ Iterator getBuckets() { @@ -267,7 +267,7 @@ boolean mightContain(long hash) { /** * Returns true if the bucket or it's alternate bucket contains the fingerprint. - * + *

                      * Expert-level API, use {@link CuckooFilter#mightContain(long)} to check if * a value is in the filter. */ @@ -307,7 +307,7 @@ boolean add(long hash) { /** * Attempts to merge the fingerprint into the specified bucket or it's alternate bucket. * Returns true if the insertion was successful, false if the filter is saturated. - * + *

                      * Expert-level API, use {@link CuckooFilter#add(long)} to insert * values into the filter */ @@ -351,7 +351,7 @@ boolean mergeFingerprint(int bucket, int fingerprint) { * Low-level insert method. Attempts to write the fingerprint into an empty entry * at this bucket's position. Returns true if that was sucessful, false if all entries * were occupied. - * + *

                      * If the fingerprint already exists in one of the entries, it will not duplicate the * fingerprint like the original paper. This means the filter _cannot_ support deletes, * but is not sensitive to "overflowing" buckets with repeated inserts @@ -376,10 +376,10 @@ private boolean tryInsert(int bucket, int fingerprint) { /** * Converts a hash into a bucket index (primary or alternate). - * + *

                      * If the hash is negative, this flips the bits. The hash is then modulo numBuckets * to get the final index. - * + *

                      * Expert-level API */ static int hashToIndex(int hash, int numBuckets) { @@ -388,16 +388,16 @@ static int hashToIndex(int hash, int numBuckets) { /** * Calculates the alternate bucket for a given bucket:fingerprint tuple - * + *

                      * The alternate bucket is the fingerprint multiplied by a mixing constant, * then xor'd against the bucket. This new value is modulo'd against * the buckets via {@link CuckooFilter#hashToIndex(int, int)} to get the final * index. - * + *

                      * Note that the xor makes this operation reversible as long as we have the * fingerprint and current bucket (regardless of if that bucket was the primary * or alternate). - * + *

                      * Expert-level API */ static int alternateIndex(int bucket, int fingerprint, int numBuckets) { @@ -424,10 +424,10 @@ private int getOffset(int bucket, int position) { /** * Calculates the fingerprint for a given hash. - * + *

                      * The fingerprint is simply the first `bitsPerEntry` number of bits that are non-zero. * If the entire hash is zero, `(int) 1` is used - * + *

                      * Expert-level API */ static int fingerprint(int hash, int bitsPerEntry, int fingerprintMask) { @@ -501,7 +501,7 @@ private double getLoadFactor(int b) { * Calculates the optimal number of buckets for this filter. The xor used in the bucketing * algorithm requires this to be a power of two, so the optimal number of buckets will * be rounded to the next largest power of two where applicable. - * + *

                      * TODO: there are schemes to avoid powers of two, might want to investigate those */ private int getNumBuckets(long capacity, double loadFactor, int b) { diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index b89d2d0549823..4e9b417e3433b 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -55,6 +55,11 @@ public class FeatureFlags { */ public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + /** + * Gates the optimization of datetime formatters caching along with change in default datetime formatter. + */ + public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -83,6 +88,17 @@ public static boolean isEnabled(String featureFlagName) { return settings != null && settings.getAsBoolean(featureFlagName, false); } + public static boolean isEnabled(Setting featureFlag) { + if ("true".equalsIgnoreCase(System.getProperty(featureFlag.getKey()))) { + // TODO: Remove the if condition once FeatureFlags are only supported via opensearch.yml + return true; + } else if (settings != null) { + return featureFlag.get(settings); + } else { + return featureFlag.getDefault(Settings.EMPTY); + } + } + public static final Setting SEGMENT_REPLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( SEGMENT_REPLICATION_EXPERIMENTAL, false, @@ -100,4 +116,10 @@ public static boolean isEnabled(String featureFlagName) { false, Property.NodeScope ); + + public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( + DATETIME_FORMATTER_CACHING, + true, + Property.NodeScope + ); } diff --git a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java index e99eb751babe8..a635160844159 100644 --- a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java @@ -75,7 +75,7 @@ public class SetBackedScalingCuckooFilter implements Writeable { * This set is used to track the insertions before we convert over to an approximate * filter. This gives us 100% accuracy for small cardinalities. This will be null * if isSetMode = false; - * + *

                      * package-private for testing */ Set hashes; @@ -178,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * Registers a circuit breaker with the datastructure. - * + *

                      * CuckooFilter's can "saturate" and refuse to accept any new values. When this happens, * the datastructure scales by adding a new filter. This new filter's bytes will be tracked * in the registered breaker when configured. diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java index d2e7e836bf07f..a9ebb86eed8a2 100644 --- a/server/src/main/java/org/opensearch/common/util/TokenBucket.java +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -20,7 +20,7 @@ public class TokenBucket { /** * Defines a monotonically increasing counter. - * + *

                      * Usage examples: * 1. clock = System::nanoTime can be used to perform rate-limiting per unit time * 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations diff --git a/server/src/main/java/org/opensearch/common/util/URIPattern.java b/server/src/main/java/org/opensearch/common/util/URIPattern.java index a3c385e5ea660..49e4b53e20740 100644 --- a/server/src/main/java/org/opensearch/common/util/URIPattern.java +++ b/server/src/main/java/org/opensearch/common/util/URIPattern.java @@ -39,9 +39,9 @@ /** * URI Pattern matcher - * + *

                      * The pattern is URI in which authority, path, query and fragment can be replace with simple pattern. - * + *

                      * For example: foobar://*.local/some_path/*?*#* will match all uris with schema foobar in local domain * with any port, with path that starts some_path and with any query and fragment. * diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java index b1f4714a90e8e..4357254176358 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java @@ -73,7 +73,7 @@ public void addListener(ActionListener listener, ExecutorService executor) { * notified of a response or exception in a runnable submitted to the ExecutorService provided. * If the future has completed, the listener will be notified immediately without forking to * a different thread. - * + *

                      * It will apply the provided ThreadContext (if not null) when executing the listening. */ public void addListener(ActionListener listener, ExecutorService executor, ThreadContext threadContext) { diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java index b4673d9534922..95df4486b9d7b 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java @@ -118,8 +118,9 @@ private void addPending(List runnables, List pending, boolean TieBreakingPrioritizedRunnable t = (TieBreakingPrioritizedRunnable) runnable; Runnable innerRunnable = t.runnable; if (innerRunnable != null) { - /** innerRunnable can be null if task is finished but not removed from executor yet, - * see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} + /* + innerRunnable can be null if task is finished but not removed from executor yet, + see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} */ pending.add(new Pending(super.unwrap(innerRunnable), t.priority(), t.insertionOrder, executing)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 4888d25e4a640..fc2e4217bae79 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -143,10 +143,10 @@ public void unregisterThreadContextStatePropagator(final ThreadContextStatePropa */ public StoredContext stashContext() { final ThreadContextStruct context = threadLocal.get(); - /** - * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. - * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. - * Otherwise when context is stash, it should be empty. + /* + X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. + This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + Otherwise when context is stash, it should be empty. */ ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putPersistent(context.persistentHeaders); diff --git a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java index 06d139fa93195..05fc968737394 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java @@ -54,7 +54,7 @@ public class LoggingDeprecationHandler implements DeprecationHandler { public static final LoggingDeprecationHandler INSTANCE = new LoggingDeprecationHandler(); /** * The logger to which to send deprecation messages. - * + *

                      * This uses ParseField's logger because that is the logger that * we have been using for many releases for deprecated fields. * Changing that will require some research to make super duper diff --git a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java index 798a58551457f..17bb0a1de267b 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java @@ -494,7 +494,7 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon /** * Returns the contents of an object as an unparsed BytesReference - * + *

                      * This is useful for things like mappings where we're copying bytes around but don't * actually need to parse their contents, and so avoids building large maps of maps * unnecessarily diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java index adfa871cbfcbe..a87edbb949d39 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java @@ -117,12 +117,11 @@ private static void extractRawValues(List values, List part, String[] pa /** * For the provided path, return its value in the xContent map. - * + *

                      * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * * @param path the value's path in the map. - * * @return the value associated with the path in the map or 'null' if the path does not exist. */ public static Object extractValue(String path, Map map) { @@ -138,7 +137,7 @@ public static Object extractValue(Map map, String... pathElements) { /** * For the provided path, return its value in the xContent map. - * + *

                      * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * @@ -197,13 +196,13 @@ private static Object extractValue(String[] pathElements, int index, Object curr * Only keep properties in {@code map} that match the {@code includes} but * not the {@code excludes}. An empty list of includes is interpreted as a * wildcard while an empty list of excludes does not match anything. - * + *

                      * If a property matches both an include and an exclude, then the exclude * wins. - * + *

                      * If an object matches, then any of its sub properties are automatically * considered as matching as well, both for includes and excludes. - * + *

                      * Dots in field names are treated as sub objects. So for instance if a * document contains {@code a.b} as a property and {@code a} is an include, * then {@code a.b} will be kept in the filtered map. @@ -555,7 +554,7 @@ public static Map nodeMapValue(Object node, String desc) { /** * Returns an array of string value from a node value. - * + *

                      * If the node represents an array the corresponding array of strings is returned. * Otherwise the node is treated as a comma-separated string. */ diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java index 665ecf77d7aa7..fb341ac2ac569 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java @@ -32,8 +32,10 @@ package org.opensearch.discovery; +import org.opensearch.Version; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,21 +53,31 @@ public class DiscoveryStats implements Writeable, ToXContentFragment { private final PendingClusterStateStats queueStats; private final PublishClusterStateStats publishStats; + private final ClusterStateStats clusterStateStats; - public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats) { + public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats, ClusterStateStats clusterStateStats) { this.queueStats = queueStats; this.publishStats = publishStats; + this.clusterStateStats = clusterStateStats; } public DiscoveryStats(StreamInput in) throws IOException { queueStats = in.readOptionalWriteable(PendingClusterStateStats::new); publishStats = in.readOptionalWriteable(PublishClusterStateStats::new); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + clusterStateStats = in.readOptionalWriteable(ClusterStateStats::new); + } else { + clusterStateStats = null; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueStats); out.writeOptionalWriteable(publishStats); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(clusterStateStats); + } } @Override @@ -77,6 +89,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (publishStats != null) { publishStats.toXContent(builder, params); } + if (clusterStateStats != null) { + clusterStateStats.toXContent(builder, params); + } builder.endObject(); return builder; } @@ -92,4 +107,8 @@ public PendingClusterStateStats getQueueStats() { public PublishClusterStateStats getPublishStats() { return publishStats; } + + public ClusterStateStats getClusterStateStats() { + return clusterStateStats; + } } diff --git a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java index 3159733336057..b663227978e8f 100644 --- a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java @@ -48,12 +48,12 @@ /** * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from {@link #UNICAST_HOSTS_FILE}. - * + *

                      * Each host/port that is part of the discovery process must be listed on * a separate line. If the port is left off an entry, we default to the * first port in the {@code transport.port} range. * An example unicast hosts file could read: - * + *

                      * 67.81.244.10 * 67.81.244.11:9305 * 67.81.244.15:9400 diff --git a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java index 9785d5b21078e..10185322c2ca6 100644 --- a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java @@ -49,7 +49,7 @@ * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from the "discovery.seed_hosts" node setting. If the port is * left off an entry, we default to the first port in the {@code transport.port} range. - * + *

                      * An example setting might look as follows: * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] * diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index 3b87c756ffdae..5a40e45fb22a6 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -249,7 +249,7 @@ public Path[] repoFiles() { /** * Resolves the specified location against the list of configured repository roots - * + *

                      * If the specified location doesn't match any of the roots, returns null. */ public Path resolveRepoFile(String location) { @@ -259,7 +259,7 @@ public Path resolveRepoFile(String location) { /** * Checks if the specified URL is pointing to the local file system and if it does, resolves the specified url * against the list of configured repository roots - * + *

                      * If the specified url doesn't match any of the roots, returns null. */ public URL resolveRepoURL(URL url) { diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 3c5ab5ba98875..40c5722308b79 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -705,7 +705,7 @@ public List lockAllForIndex( * write operation on a shards data directory like deleting files, creating a new index writer * or recover from a different shard instance into it. If the shard lock can not be acquired * a {@link ShardLockObtainFailedException} is thrown. - * + *

                      * Note: this method will return immediately if the lock can't be acquired. * * @param id the shard ID to lock @@ -778,7 +778,7 @@ public interface ShardLocker { /** * Returns all currently lock shards. - * + *

                      * Note: the shard ids return do not contain a valid Index UUID */ public Set lockedShards() { diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java index 4556d6537ffdc..fc2ba817ace5f 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java @@ -17,7 +17,7 @@ * a category class used to identify the reader defined within the JVM that the extension is running on. * Additionally, this method takes in the extension's corresponding DiscoveryNode and a byte array (context) that the * extension's reader will be applied to. - * + *

                      * By convention the extensions' reader is a constructor that takes StreamInput as an argument for most classes and a static method for things like enums. * Classes will implement this via a constructor (or a static method in the case of enumerations), it's something that should * look like: diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 9f9ba548143c6..b531abcb845d7 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -300,7 +300,7 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) * Loads a single extension * @param extension The extension to be loaded */ - public void loadExtension(Extension extension) throws IOException { + public DiscoveryExtensionNode loadExtension(Extension extension) throws IOException { validateExtension(extension); DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( extension.getName(), @@ -314,6 +314,12 @@ public void loadExtension(Extension extension) throws IOException { extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); extensionSettingsMap.put(extension.getUniqueId(), extension); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + return discoveryExtensionNode; + } + + public void initializeExtension(Extension extension) throws IOException { + DiscoveryExtensionNode node = loadExtension(extension); + initializeExtensionNode(node); } private void validateField(String fieldName, String value) throws IOException { @@ -340,11 +346,11 @@ private void validateExtension(Extension extension) throws IOException { */ public void initialize() { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { - initializeExtension(extension); + initializeExtensionNode(extension); } } - private void initializeExtension(DiscoveryExtensionNode extension) { + public void initializeExtensionNode(DiscoveryExtensionNode extensionNode) { final CompletableFuture inProgressFuture = new CompletableFuture<>(); final TransportResponseHandler initializeExtensionResponseHandler = new TransportResponseHandler< @@ -384,7 +390,8 @@ public String executor() { transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - extensionIdMap.remove(extension.getId()); + logger.warn("Error registering extension: " + extensionNode.getId(), e); + extensionIdMap.remove(extensionNode.getId()); if (e.getCause() instanceof ConnectTransportException) { logger.info("No response from extension to request.", e); throw (ConnectTransportException) e.getCause(); @@ -399,11 +406,11 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - transportService.connectToExtensionNode(extension); + transportService.connectToExtensionNode(extensionNode); transportService.sendRequest( - extension, + extensionNode, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension, issueServiceAccount(extension)), + new InitializeExtensionRequest(transportService.getLocalNode(), extensionNode, issueServiceAccount(extensionNode)), initializeExtensionResponseHandler ); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index 97851cbd394a0..383796f0c3b44 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -62,6 +62,9 @@ public TransportResponse handleRegisterRestActionsRequest( DynamicActionRegistry dynamicActionRegistry ) throws Exception { DiscoveryExtensionNode discoveryExtensionNode = extensionIdMap.get(restActionsRequest.getUniqueId()); + if (discoveryExtensionNode == null) { + throw new IllegalStateException("Missing extension node for " + restActionsRequest.getUniqueId()); + } RestHandler handler = new RestSendToExtensionAction( restActionsRequest, discoveryExtensionNode, diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index 4b622b841a040..fc7c21a6eccd6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -159,8 +159,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client extAdditionalSettings ); try { - extensionsManager.loadExtension(extension); - extensionsManager.initialize(); + extensionsManager.initializeExtension(extension); } catch (CompletionException e) { Throwable cause = e.getCause(); if (cause instanceof TimeoutException) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 33f44a913dd8a..41783b89ccc69 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -150,7 +150,7 @@ public RestSendToExtensionAction( @Override public String getName() { - return SEND_TO_EXTENSION_ACTION; + return this.discoveryExtensionNode.getId() + ":" + SEND_TO_EXTENSION_ACTION; } @Override diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 59ef894958cbe..853fe03904c53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -50,7 +50,7 @@ /** * An abstract class that implements basic functionality for allocating * shards to nodes based on shard copies that already exist in the cluster. - * + *

                      * Individual implementations of this class are responsible for providing * the logic to determine to which nodes (if any) those shards are allocated. * diff --git a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java index 4c562b348f141..1563ac84bdd1c 100644 --- a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.settings.ClusterSettings; @@ -121,21 +120,7 @@ static ClusterState updateRoutingTable(final ClusterState state) { // initialize all index routing tables as empty final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(state.routingTable()); for (final IndexMetadata cursor : state.metadata().indices().values()) { - // Whether IndexMetadata is recovered from local disk or remote it doesn't matter to us at this point. - // We are only concerned about index data recovery here. Which is why we only check for remote store enabled and not for remote - // cluster state enabled. - if (cursor.getSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) == false - || state.routingTable().hasIndex(cursor.getIndex()) == false - || state.routingTable() - .index(cursor.getIndex()) - .shardsMatchingPredicateCount( - shardRouting -> shardRouting.primary() - // We need to ensure atleast one of the primaries is being recovered from remote. - // This ensures we have gone through the RemoteStoreRestoreService and routing table is updated - && shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) == 0) { - routingTableBuilder.addAsRecovery(cursor); - } + routingTableBuilder.addAsRecovery(cursor); } // start with 0 based versions for routing table routingTableBuilder.version(0); diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index e7c1ba01e7920..48479691689e5 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -244,7 +244,7 @@ List filterDanglingIndices(Metadata metadata, Map * Dangling importing indices with aliases is dangerous, it could for instance result in inability to write to an existing alias if it * previously had only one index with any is_write_index indication. */ diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 6b26af148b2ea..c3056276706a0 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.coordination.InMemoryPersistedState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Manifest; @@ -67,7 +68,6 @@ import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; -import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -91,7 +91,7 @@ /** * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts. - * + *

                      * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that the state being * loaded when constructing the instance of this class is not necessarily the state that will be used as {@link ClusterState#metadata()} because it might be * stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and non-stale state, and cluster-manager-ineligible nodes @@ -158,38 +158,46 @@ public void start( PersistedState remotePersistedState = null; boolean success = false; try { - ClusterState clusterState = prepareInitialClusterState( + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) + .version(lastAcceptedVersion) + .metadata(metadata) + .build(); + + if (DiscoveryNode.isClusterManagerNode(settings) && isRemoteStoreClusterStateEnabled(settings)) { + // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote + // If there is no valid state on remote, continue with initial empty state + // If there is a valid state, then restore index metadata using this state + String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; + if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { + lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + clusterState.getClusterName().value() + ); + if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { + // Load state from remote + final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( + // Remote Metadata should always override local disk Metadata + // if local disk Metadata's cluster uuid is UNKNOWN_UUID + ClusterState.builder(clusterState).metadata(Metadata.EMPTY_METADATA).build(), + lastKnownClusterUUID, + false, + new String[] {} + ); + clusterState = remoteRestoreResult.getClusterState(); + } + } + remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); + } + + // Recovers Cluster and Index level blocks + clusterState = prepareInitialClusterState( transportService, clusterService, - ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) - .version(lastAcceptedVersion) - .metadata(upgradeMetadataForNode(metadata, metadataIndexUpgradeService, metadataUpgrader)) + ClusterState.builder(clusterState) + .metadata(upgradeMetadataForNode(clusterState.metadata(), metadataIndexUpgradeService, metadataUpgrader)) .build() ); if (DiscoveryNode.isClusterManagerNode(settings)) { - if (isRemoteStoreClusterStateEnabled(settings)) { - // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote - // If there is no valid state on remote, continue with initial empty state - // If there is a valid state, then restore index metadata using this state - String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; - if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { - lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( - clusterState.getClusterName().value() - ); - if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { - // Load state from remote - final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( - clusterState, - lastKnownClusterUUID, - false, - new String[] {} - ); - clusterState = remoteRestoreResult.getClusterState(); - } - } - remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); - } persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { persistedState = new AsyncLucenePersistedState( @@ -544,6 +552,9 @@ static class LucenePersistedState implements PersistedState { // out by this version of OpenSearch. TODO TBD should we avoid indexing when possible? final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); try { + // During remote state restore, there will be non empty metadata getting persisted with cluster UUID as + // ClusterState.UNKOWN_UUID . The valid UUID will be generated and persisted along with the first cluster state getting + // published. writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); } catch (Exception e) { try { @@ -605,6 +616,12 @@ public void setLastAcceptedState(ClusterState clusterState) { lastAcceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + // Note: These stats are not published yet, will come in future + return null; + } + private PersistedClusterStateService.Writer getWriterSafe() { final PersistedClusterStateService.Writer writer = persistenceWriter.get(); if (writer == null) { @@ -678,24 +695,21 @@ public void setLastAcceptedState(ClusterState clusterState) { try { final ClusterMetadataManifest manifest; if (shouldWriteFullClusterState(clusterState)) { - if (clusterState.metadata().clusterUUIDCommitted() == true) { - final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( - clusterState.getClusterName().value(), + final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + if (latestManifest.isPresent()) { + // The previous UUID should not change for the current UUID. So fetching the latest manifest + // from remote store and getting the previous UUID. + previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); + } else { + // When the user starts the cluster with remote state disabled but later enables the remote state, + // there will not be any manifest for the current cluster UUID. + logger.error( + "Latest manifest is not present in remote store for cluster UUID: {}", clusterState.metadata().clusterUUID() ); - if (latestManifest.isPresent()) { - // The previous UUID should not change for the current UUID. So fetching the latest manifest - // from remote store and getting the previous UUID. - previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); - } else { - // When the user starts the cluster with remote state disabled but later enables the remote state, - // there will not be any manifest for the current cluster UUID. - logger.error( - "Latest manifest is not present in remote store for cluster UUID: {}", - clusterState.metadata().clusterUUID() - ); - previousClusterUUID = ClusterState.UNKNOWN_UUID; - } } manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); } else { @@ -706,17 +720,17 @@ assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == assert verifyManifestAndClusterState(manifest, clusterState) == true : "Manifest and ClusterState are not in sync"; lastAcceptedManifest = manifest; lastAcceptedState = clusterState; - } catch (RepositoryMissingException e) { - // TODO This logic needs to be modified once PR for repo registration during bootstrap is pushed - // https://github.com/opensearch-project/OpenSearch/pull/9105/ - // After the above PR is pushed, we can remove this silent failure and throw the exception instead. - logger.error("Remote repository is not yet registered"); - lastAcceptedState = clusterState; } catch (Exception e) { + remoteClusterStateService.writeMetadataFailed(); handleExceptionOnWrite(e); } } + @Override + public PersistedStateStats getStats() { + return remoteClusterStateService.getStats(); + } + private boolean verifyManifestAndClusterState(ClusterMetadataManifest manifest, ClusterState clusterState) { assert manifest != null : "ClusterMetadataManifest is null"; assert clusterState != null : "ClusterState is null"; diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java index 8db4736bcdc40..75beb6e29599c 100644 --- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java @@ -111,16 +111,16 @@ * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes * to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any * documents that have not changed. The index has the following fields: - * + *

                      * +------------------------------+-----------------------------+----------------------------------------------+ * | "type" (string field) | "index_uuid" (string field) | "data" (stored binary field in SMILE format) | * +------------------------------+-----------------------------+----------------------------------------------+ * | GLOBAL_TYPE_NAME == "global" | (omitted) | Global metadata | * | INDEX_TYPE_NAME == "index" | Index UUID | Index metadata | * +------------------------------+-----------------------------+----------------------------------------------+ - * + *

                      * Additionally each commit has the following user data: - * + *

                      * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ * | Key symbol | Key literal | Value | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ @@ -129,7 +129,7 @@ * | NODE_ID_KEY | "node_id" | The (persistent) ID of the node that wrote this metadata | * | NODE_VERSION_KEY | "node_version" | The (ID of the) version of the node that wrote this metadata | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ - * + *

                      * (the last-accepted term is recorded in Metadata → CoordinationMetadata so does not need repeating here) * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index 4dc9396751fc9..2807be00feeaa 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -69,7 +69,7 @@ * that holds a copy of the shard. The shard metadata from each node is compared against the * set of valid allocation IDs and for all valid shard copies (if any), the primary shard allocator * executes the allocation deciders to chose a copy to assign the primary shard to. - * + *

                      * Note that the PrimaryShardAllocator does *not* allocate primaries on index creation * (see {@link org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator}), * nor does it allocate primaries when a primary shard failed and there is a valid replica @@ -386,11 +386,11 @@ protected static NodeShardsResult buildNodeShardsResult( } } - /** - * Orders the active shards copies based on below comparators - * 1. No store exception i.e. shard copy is readable - * 2. Prefer previous primary shard - * 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. + /* + Orders the active shards copies based on below comparators + 1. No store exception i.e. shard copy is readable + 2. Prefer previous primary shard + 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. */ final Comparator comparator; // allocation preference if (matchAnyShard) { diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index a6c9319b6dc54..f530052c5bcd1 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -295,7 +295,7 @@ public AllocateUnassignedDecision makeAllocationDecision( /** * Determines if the shard can be allocated on at least one node based on the allocation deciders. - * + *

                      * Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one * node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 40b16f3d6323b..4725f40076ce2 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -33,6 +33,9 @@ */ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { + public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest. + public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file. + private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); private static final ParseField CLUSTER_UUID_FIELD = new ParseField("cluster_uuid"); @@ -40,6 +43,8 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField OPENSEARCH_VERSION_FIELD = new ParseField("opensearch_version"); private static final ParseField NODE_ID_FIELD = new ParseField("node_id"); private static final ParseField COMMITTED_FIELD = new ParseField("committed"); + private static final ParseField CODEC_VERSION_FIELD = new ParseField("codec_version"); + private static final ParseField GLOBAL_METADATA_FIELD = new ParseField("global_metadata"); private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); private static final ParseField CLUSTER_UUID_COMMITTED = new ParseField("cluster_uuid_committed"); @@ -84,7 +89,33 @@ private static boolean clusterUUIDCommitted(Object[] fields) { return (boolean) fields[9]; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + private static int codecVersion(Object[] fields) { + return (int) fields[10]; + } + + private static String globalMetadataFileName(Object[] fields) { + return (String) fields[11]; + } + + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> new ClusterMetadataManifest( + term(fields), + version(fields), + clusterUUID(fields), + stateUUID(fields), + opensearchVersion(fields), + nodeId(fields), + committed(fields), + CODEC_V0, + null, + indices(fields), + previousClusterUUID(fields), + clusterUUIDCommitted(fields) + ) + ); + + private static final ConstructingObjectParser PARSER_V1 = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> new ClusterMetadataManifest( term(fields), @@ -94,29 +125,45 @@ private static boolean clusterUUIDCommitted(Object[] fields) { opensearchVersion(fields), nodeId(fields), committed(fields), + codecVersion(fields), + globalMetadataFileName(fields), indices(fields), previousClusterUUID(fields), clusterUUIDCommitted(fields) ) ); + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V1; + static { - PARSER.declareLong(ConstructingObjectParser.constructorArg(), CLUSTER_TERM_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), STATE_VERSION_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), STATE_UUID_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), OPENSEARCH_VERSION_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), COMMITTED_FIELD); - PARSER.declareObjectArray( + declareParser(PARSER_V0, CODEC_V0); + declareParser(PARSER_V1, CODEC_V1); + } + + private static void declareParser(ConstructingObjectParser parser, long codec_version) { + parser.declareLong(ConstructingObjectParser.constructorArg(), CLUSTER_TERM_FIELD); + parser.declareLong(ConstructingObjectParser.constructorArg(), STATE_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), STATE_UUID_FIELD); + parser.declareInt(ConstructingObjectParser.constructorArg(), OPENSEARCH_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), COMMITTED_FIELD); + parser.declareObjectArray( ConstructingObjectParser.constructorArg(), (p, c) -> UploadedIndexMetadata.fromXContent(p), INDICES_FIELD ); - PARSER.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); + parser.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); + + if (codec_version >= CODEC_V1) { + parser.declareInt(ConstructingObjectParser.constructorArg(), CODEC_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), GLOBAL_METADATA_FIELD); + } } + private final int codecVersion; + private final String globalMetadataFileName; private final List indices; private final long clusterTerm; private final long stateVersion; @@ -168,6 +215,14 @@ public boolean isClusterUUIDCommitted() { return clusterUUIDCommitted; } + public int getCodecVersion() { + return codecVersion; + } + + public String getGlobalMetadataFileName() { + return globalMetadataFileName; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -176,6 +231,8 @@ public ClusterMetadataManifest( Version opensearchVersion, String nodeId, boolean committed, + int codecVersion, + String globalMetadataFileName, List indices, String previousClusterUUID, boolean clusterUUIDCommitted @@ -187,6 +244,8 @@ public ClusterMetadataManifest( this.opensearchVersion = opensearchVersion; this.nodeId = nodeId; this.committed = committed; + this.codecVersion = codecVersion; + this.globalMetadataFileName = globalMetadataFileName; this.indices = Collections.unmodifiableList(indices); this.previousClusterUUID = previousClusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; @@ -203,6 +262,13 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); this.previousClusterUUID = in.readString(); this.clusterUUIDCommitted = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.codecVersion = in.readInt(); + this.globalMetadataFileName = in.readString(); + } else { + this.codecVersion = CODEC_V0; // Default codec + this.globalMetadataFileName = null; + } } public static Builder builder() { @@ -231,6 +297,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); builder.field(CLUSTER_UUID_COMMITTED.getPreferredName(), isClusterUUIDCommitted()); + if (onOrAfterCodecVersion(CODEC_V1)) { + builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); + builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); + } return builder; } @@ -246,6 +316,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indices); out.writeString(previousClusterUUID); out.writeBoolean(clusterUUIDCommitted); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeInt(codecVersion); + out.writeString(globalMetadataFileName); + } } @Override @@ -266,12 +340,16 @@ public boolean equals(Object o) { && Objects.equals(nodeId, that.nodeId) && Objects.equals(committed, that.committed) && Objects.equals(previousClusterUUID, that.previousClusterUUID) - && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted); + && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted) + && Objects.equals(globalMetadataFileName, that.globalMetadataFileName) + && Objects.equals(codecVersion, that.codecVersion); } @Override public int hashCode() { return Objects.hash( + codecVersion, + globalMetadataFileName, indices, clusterTerm, stateVersion, @@ -290,8 +368,16 @@ public String toString() { return Strings.toString(MediaTypeRegistry.JSON, this); } + public boolean onOrAfterCodecVersion(int codecVersion) { + return this.codecVersion >= codecVersion; + } + + public static ClusterMetadataManifest fromXContentV0(XContentParser parser) throws IOException { + return PARSER_V0.parse(parser, null); + } + public static ClusterMetadataManifest fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); + return CURRENT_PARSER.parse(parser, null); } /** @@ -301,6 +387,8 @@ public static ClusterMetadataManifest fromXContent(XContentParser parser) throws */ public static class Builder { + private String globalMetadataFileName; + private int codecVersion; private List indices; private long clusterTerm; private long stateVersion; @@ -317,6 +405,16 @@ public Builder indices(List indices) { return this; } + public Builder codecVersion(int codecVersion) { + this.codecVersion = codecVersion; + return this; + } + + public Builder globalMetadataFileName(String globalMetadataFileName) { + this.globalMetadataFileName = globalMetadataFileName; + return this; + } + public Builder clusterTerm(long clusterTerm) { this.clusterTerm = clusterTerm; return this; @@ -378,6 +476,8 @@ public Builder(ClusterMetadataManifest manifest) { this.opensearchVersion = manifest.opensearchVersion; this.nodeId = manifest.nodeId; this.committed = manifest.committed; + this.globalMetadataFileName = manifest.globalMetadataFileName; + this.codecVersion = manifest.codecVersion; this.indices = new ArrayList<>(manifest.indices); this.previousClusterUUID = manifest.previousClusterUUID; this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; @@ -392,6 +492,8 @@ public ClusterMetadataManifest build() { opensearchVersion, nodeId, committed, + codecVersion, + globalMetadataFileName, indices, previousClusterUUID, clusterUUIDCommitted diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index dddc5376803a5..b3309b1fd8a63 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -15,6 +15,7 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; @@ -27,6 +28,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -55,6 +57,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -80,7 +83,23 @@ public class RemoteClusterStateService implements Closeable { private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); - public static final int INDEX_METADATA_UPLOAD_WAIT_MILLIS = 20000; + public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.index_metadata.upload_timeout", + INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.global_metadata.upload_timeout", + GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", @@ -88,11 +107,27 @@ public class RemoteClusterStateService implements Closeable { IndexMetadata::fromXContent ); + public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "metadata", + METADATA_NAME_FORMAT, + Metadata::fromXContent + ); + + /** + * Manifest format compatible with older codec v0, where codec version was missing. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V0 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); + + /** + * Manifest format compatible with codec v1, where we introduced codec versions/global metadata. + */ public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( "cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContent ); + /** * Used to specify if cluster state metadata should be published to remote store */ @@ -103,11 +138,13 @@ public class RemoteClusterStateService implements Closeable { Property.Final ); - private static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; - private static final String INDEX_PATH_TOKEN = "index"; - private static final String MANIFEST_PATH_TOKEN = "manifest"; - private static final String MANIFEST_FILE_PREFIX = "manifest"; - private static final String INDEX_METADATA_FILE_PREFIX = "metadata"; + public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; + public static final String INDEX_PATH_TOKEN = "index"; + public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; + public static final String MANIFEST_PATH_TOKEN = "manifest"; + public static final String MANIFEST_FILE_PREFIX = "manifest"; + public static final String METADATA_FILE_PREFIX = "metadata"; + public static final int SPLITED_MANIFEST_FILE_LENGTH = 6; // file name manifest__term__version__C/P__timestamp__codecversion private final String nodeId; private final Supplier repositoriesService; @@ -118,7 +155,23 @@ public class RemoteClusterStateService implements Closeable { private BlobStoreTransferService blobStoreTransferService; private volatile TimeValue slowWriteLoggingThreshold; + private volatile TimeValue indexMetadataUploadTimeout; + private volatile TimeValue globalMetadataUploadTimeout; + private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); + private final RemotePersistenceStats remoteStateStats; + public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V1; + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + + // ToXContent Params with gateway mode. + // We are using gateway context mode to persist all custom metadata. + public static final ToXContent.Params FORMAT_PARAMS; + static { + Map params = new HashMap<>(1); + params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new ToXContent.MapParams(params); + } public RemoteClusterStateService( String nodeId, @@ -135,7 +188,12 @@ public RemoteClusterStateService( this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; this.threadpool = threadPool; this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); + this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + this.remoteStateStats = new RemotePersistenceStats(); } private BlobStoreTransferService getBlobStoreTransferService() { @@ -159,13 +217,25 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri return null; } + // TODO: we can upload global metadata and index metadata in parallel. [issue: #10645] + // Write globalMetadata + String globalMetadataFile = writeGlobalMetadata(clusterState); + // any validations before/after upload ? final List allUploadedIndexMetadata = writeIndexMetadataParallel( clusterState, new ArrayList<>(clusterState.metadata().indices().values()) ); - final ClusterMetadataManifest manifest = uploadManifest(clusterState, allUploadedIndexMetadata, previousClusterUUID, false); + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + allUploadedIndexMetadata, + previousClusterUUID, + globalMetadataFile, + false + ); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote full state with [{}] indices", @@ -174,9 +244,8 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri allUploadedIndexMetadata.size() ); } else { - // todo change to debug logger.info( - "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices", + "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices and global metadata", durationMillis, allUploadedIndexMetadata.size() ); @@ -203,6 +272,23 @@ public ClusterMetadataManifest writeIncrementalMetadata( return null; } assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); + + // Write Global Metadata + final boolean updateGlobalMetadata = Metadata.isGlobalStateEquals( + previousClusterState.metadata(), + clusterState.metadata() + ) == false; + String globalMetadataFile; + // For migration case from codec V0 to V1, we have added null check on global metadata file, + // If file is empty and codec is 1 then write global metadata. + if (updateGlobalMetadata || previousManifest.getGlobalMetadataFileName() == null) { + globalMetadataFile = writeGlobalMetadata(clusterState); + } else { + logger.debug("Global metadata has not updated in cluster state, skipping upload of it"); + globalMetadataFile = previousManifest.getGlobalMetadataFileName(); + } + + // Write Index Metadata final Map previousStateIndexMetadataVersionByName = new HashMap<>(); for (final IndexMetadata indexMetadata : previousClusterState.metadata().indices().values()) { previousStateIndexMetadataVersionByName.put(indexMetadata.getIndex().getName(), indexMetadata.getVersion()); @@ -219,7 +305,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( for (final IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { final Long previousVersion = previousStateIndexMetadataVersionByName.get(indexMetadata.getIndex().getName()); if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { - logger.trace( + logger.debug( "updating metadata for [{}], changing version from [{}] to [{}]", indexMetadata.getIndex(), previousVersion, @@ -245,31 +331,95 @@ public ClusterMetadataManifest writeIncrementalMetadata( clusterState, new ArrayList<>(allUploadedIndexMetadata.values()), previousManifest.getPreviousClusterUUID(), + globalMetadataFile, false ); deleteStaleClusterMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), RETAINED_MANIFESTS); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " - + "wrote metadata for [{}] indices and skipped [{}] unchanged indices", + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", durationMillis, slowWriteLoggingThreshold, numIndicesUpdated, - numIndicesUnchanged + numIndicesUnchanged, + updateGlobalMetadata ); } else { - logger.trace( - "writing cluster state took [{}ms]; " + "wrote metadata for [{}] indices and skipped [{}] unchanged indices", + logger.info( + "writing cluster state for version [{}] took [{}ms]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", + manifest.getStateVersion(), durationMillis, numIndicesUpdated, - numIndicesUnchanged + numIndicesUnchanged, + updateGlobalMetadata ); } return manifest; } + /** + * Uploads provided ClusterState's global Metadata to remote store in parallel. + * The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @return String file name where globalMetadata file is stored. + */ + private String writeGlobalMetadata(ClusterState clusterState) throws IOException { + + AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + + final BlobContainer globalMetadataContainer = globalMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + final String globalMetadataFilename = globalMetadataFileName(clusterState.metadata()); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "GlobalMetadata uploaded successfully.")); + result.set(globalMetadataContainer.path().buildAsString() + globalMetadataFilename); + }, ex -> { exceptionReference.set(ex); }), latch); + + GLOBAL_METADATA_FORMAT.writeAsyncWithUrgentPriority( + clusterState.metadata(), + globalMetadataContainer, + globalMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + + try { + if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + // TODO: We should add metrics where transfer is timing out. [Issue: #10687] + GlobalMetadataTransferException ex = new GlobalMetadataTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + GlobalMetadataTransferException exception = new GlobalMetadataTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new GlobalMetadataTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + return result.get(); + } + /** * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. * @@ -306,7 +456,7 @@ private List writeIndexMetadataParallel(ClusterState clus } try { - if (latch.await(INDEX_METADATA_UPLOAD_WAIT_MILLIS, TimeUnit.MILLISECONDS) == false) { + if (latch.await(getIndexMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { IndexMetadataTransferException ex = new IndexMetadataTransferException( String.format( Locale.ROOT, @@ -373,25 +523,34 @@ private void writeIndexMetadataAsync( ex -> latchedActionListener.onFailure(new IndexMetadataTransferException(indexMetadata.getIndex().toString(), ex)) ); - INDEX_METADATA_FORMAT.writeAsync( + INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( indexMetadata, indexMetadataContainer, indexMetadataFilename, blobStoreRepository.getCompressor(), - completionListener + completionListener, + FORMAT_PARAMS ); } @Nullable public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) throws IOException { + assert clusterState != null : "Last accepted cluster state is not set"; if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); return null; } - assert clusterState != null : "Last accepted cluster state is not set"; assert previousManifest != null : "Last cluster metadata manifest is not set"; - return uploadManifest(clusterState, previousManifest.getIndices(), previousManifest.getPreviousClusterUUID(), true); + ClusterMetadataManifest committedManifest = uploadManifest( + clusterState, + previousManifest.getIndices(), + previousManifest.getPreviousClusterUUID(), + previousManifest.getGlobalMetadataFileName(), + true + ); + deleteStaleClusterUUIDs(clusterState, committedManifest); + return committedManifest; } @Override @@ -416,10 +575,11 @@ private ClusterMetadataManifest uploadManifest( ClusterState clusterState, List uploadedIndexMetadata, String previousClusterUUID, + String globalClusterMetadataFileName, boolean committed ) throws IOException { synchronized (this) { - final String manifestFileName = getManifestFileName(clusterState.term(), clusterState.version()); + final String manifestFileName = getManifestFileName(clusterState.term(), clusterState.version(), committed); final ClusterMetadataManifest manifest = new ClusterMetadataManifest( clusterState.term(), clusterState.getVersion(), @@ -428,6 +588,8 @@ private ClusterMetadataManifest uploadManifest( Version.CURRENT, nodeId, committed, + MANIFEST_CURRENT_CODEC_VERSION, + globalClusterMetadataFileName, uploadedIndexMetadata, previousClusterUUID, clusterState.metadata().clusterUUIDCommitted() @@ -440,7 +602,18 @@ private ClusterMetadataManifest uploadManifest( private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) throws IOException { final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); - CLUSTER_METADATA_MANIFEST_FORMAT.write(uploadManifest, metadataManifestContainer, fileName, blobStoreRepository.getCompressor()); + CLUSTER_METADATA_MANIFEST_FORMAT.write( + uploadManifest, + metadataManifestContainer, + fileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + logger.debug( + "Metadata manifest file [{}] written during [{}] phase. ", + fileName, + uploadManifest.isCommitted() ? "commit" : "publish" + ); } private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { @@ -459,6 +632,12 @@ private BlobContainer indexMetadataContainer(String clusterName, String clusterU .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); } + private BlobContainer globalMetadataContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/ + return blobStoreRepository.blobStore() + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(GLOBAL_METADATA_PATH_TOKEN)); + } + private BlobContainer manifestContainer(String clusterName, String clusterUUID) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); @@ -481,22 +660,57 @@ private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; } - private static String getManifestFileName(long term, long version) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637_456536447 - return String.join(DELIMITER, getManifestFileNamePrefix(term, version), RemoteStoreUtils.invertLong(System.currentTimeMillis())); + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } + + private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { + this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; + } + + public TimeValue getIndexMetadataUploadTimeout() { + return this.indexMetadataUploadTimeout; } - private static String getManifestFileNamePrefix(long term, long version) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637 - return String.join(DELIMITER, MANIFEST_PATH_TOKEN, RemoteStoreUtils.invertLong(term), RemoteStoreUtils.invertLong(version)); + public TimeValue getGlobalMetadataUploadTimeout() { + return this.globalMetadataUploadTimeout; + } + + static String getManifestFileName(long term, long version, boolean committed) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ + return String.join( + DELIMITER, + MANIFEST_PATH_TOKEN, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + (committed ? "C" : "P"), // C for committed and P for published + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(MANIFEST_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last place to + // determine codec version. + ); + } + + static String indexMetadataFileName(IndexMetadata indexMetadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index//metadata______ + return String.join( + DELIMITER, + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(indexMetadata.getVersion()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last + // place to determine codec version. + ); } - private static String indexMetadataFileName(IndexMetadata indexMetadata) { + private static String globalMetadataFileName(Metadata metadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/metadata______ return String.join( DELIMITER, - INDEX_METADATA_FILE_PREFIX, - String.valueOf(indexMetadata.getVersion()), - String.valueOf(System.currentTimeMillis()) + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(metadata.version()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) ); } @@ -509,18 +723,18 @@ private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster + * @param clusterMetadataManifest manifest file of cluster * @return {@code Map} latest IndexUUID to IndexMetadata map */ - public Map getLatestIndexMetadata(String clusterName, String clusterUUID) throws IOException { - start(); - Map remoteIndexMetadata = new HashMap<>(); - Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - if (!clusterMetadataManifest.isPresent()) { - throw new IllegalStateException("Latest index metadata is not present for the provided clusterUUID"); - } - assert Objects.equals(clusterUUID, clusterMetadataManifest.get().getClusterUUID()) + private Map getIndexMetadataMap( + String clusterName, + String clusterUUID, + ClusterMetadataManifest clusterMetadataManifest + ) { + assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.get().getIndices()) { + Map remoteIndexMetadata = new HashMap<>(); + for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); } @@ -536,10 +750,11 @@ public Map getLatestIndexMetadata(String clusterName, Str * @return {@link IndexMetadata} */ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { + BlobContainer blobContainer = indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()); try { String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); return INDEX_METADATA_FORMAT.read( - indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()), + blobContainer, splitPath[splitPath.length - 1], blobStoreRepository.getNamedXContentRegistry() ); @@ -551,6 +766,55 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U } } + /** + * Fetch latest metadata from remote cluster state including global metadata and index metadata + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return {@link IndexMetadata} + */ + public Metadata getLatestMetadata(String clusterName, String clusterUUID) { + start(); + Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!clusterMetadataManifest.isPresent()) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) + ); + } + // Fetch Global Metadata + Metadata globalMetadata = getGlobalMetadata(clusterName, clusterUUID, clusterMetadataManifest.get()); + + // Fetch Index Metadata + Map indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); + + Map indexMetadataMap = new HashMap<>(); + indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); + + return Metadata.builder(globalMetadata).indices(indexMetadataMap).build(); + } + + private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); + try { + // Fetch Global metadata + if (globalMetadataFileName != null) { + String[] splitPath = globalMetadataFileName.split("/"); + return GLOBAL_METADATA_FORMAT.read( + globalMetadataContainer(clusterName, clusterUUID), + splitPath[splitPath.length - 1], + blobStoreRepository.getNamedXContentRegistry() + ); + } else { + return Metadata.EMPTY_METADATA; + } + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), + e + ); + } + } + /** * Fetch latest ClusterMetadataManifest from remote state store * @@ -560,10 +824,7 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U */ public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); - if (latestManifestFileName.isPresent()) { - return Optional.of(fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, latestManifestFileName.get())); - } - return Optional.empty(); + return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); } /** @@ -583,7 +844,8 @@ public String getLastKnownUUIDFromRemote(String clusterName) { return validChain.get(0); } catch (IOException e) { throw new IllegalStateException( - String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName) + String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName), + e ); } } @@ -604,7 +866,8 @@ private Map getLatestManifestForAllClusterUUIDs manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); } catch (Exception e) { throw new IllegalStateException( - String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID) + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e ); } } @@ -618,25 +881,31 @@ private Map getLatestManifestForAllClusterUUIDs * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain */ private List createClusterChain(final Map manifestsByClusterUUID, final String clusterName) { - final Map clusterUUIDGraph = manifestsByClusterUUID.values() + final List validClusterManifests = manifestsByClusterUUID.values() .stream() + .filter(this::isValidClusterUUID) + .collect(Collectors.toList()); + final Map clusterUUIDGraph = validClusterManifests.stream() .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); - final List validClusterUUIDs = manifestsByClusterUUID.values() - .stream() - .filter(m -> !isInvalidClusterUUID(m) && !clusterUUIDGraph.containsValue(m.getClusterUUID())) + final List topLevelClusterUUIDs = validClusterManifests.stream() .map(ClusterMetadataManifest::getClusterUUID) + .filter(clusterUUID -> !clusterUUIDGraph.containsValue(clusterUUID)) .collect(Collectors.toList()); - if (validClusterUUIDs.isEmpty()) { - logger.info("There is no valid previous cluster UUID"); + + if (topLevelClusterUUIDs.isEmpty()) { + // This can occur only when there are no valid cluster UUIDs + assert validClusterManifests.isEmpty() : "There are no top level cluster UUIDs even when there are valid cluster UUIDs"; + logger.info("There is no valid previous cluster UUID. All cluster UUIDs evaluated are: {}", manifestsByClusterUUID.keySet()); return Collections.emptyList(); } - if (validClusterUUIDs.size() > 1) { + if (topLevelClusterUUIDs.size() > 1) { + logger.info("Top level cluster UUIDs: {}", topLevelClusterUUIDs); // If the valid cluster UUIDs are more that 1, it means there was some race condition where // more then 2 cluster manager nodes tried to become active cluster manager and published // 2 cluster UUIDs which followed the same previous UUID. final Map manifestsByClusterUUIDTrimmed = trimClusterUUIDs( manifestsByClusterUUID, - validClusterUUIDs, + topLevelClusterUUIDs, clusterName ); if (manifestsByClusterUUID.size() == manifestsByClusterUUIDTrimmed.size()) { @@ -645,19 +914,20 @@ private List createClusterChain(final Map validChain = new ArrayList<>(); - String currentUUID = validClusterUUIDs.get(0); + String currentUUID = topLevelClusterUUIDs.get(0); while (currentUUID != null && !ClusterState.UNKNOWN_UUID.equals(currentUUID)) { validChain.add(currentUUID); // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph currentUUID = clusterUUIDGraph.get(currentUUID); } + logger.info("Known UUIDs found in remote store : [{}]", validChain); return validChain; } @@ -678,13 +948,10 @@ private Map trimClusterUUIDs( // Here we compare the manifest of current UUID to that of previous UUID // In case currentUUID's latest manifest is same as previous UUIDs latest manifest, // that means it was restored from previousUUID and no IndexMetadata update was performed on it. - if (ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { - if (currentManifest.getIndices().isEmpty()) { - trimmedUUIDs.remove(clusterUUID); - } - } else { + if (!ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); - if (isMetadataEqual(currentManifest, previousManifest, clusterName)) { + if (isMetadataEqual(currentManifest, previousManifest, clusterName) + && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { trimmedUUIDs.remove(clusterUUID); } } @@ -714,35 +981,54 @@ private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataMa return true; } - private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { - return !manifest.isClusterUUIDCommitted(); + private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + Metadata secondGlobalMetadata = getGlobalMetadata(clusterName, second.getClusterUUID(), second); + Metadata firstGlobalMetadata = getGlobalMetadata(clusterName, first.getClusterUUID(), first); + return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); + } + + private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { + return manifest.isClusterUUIDCommitted(); } /** - * Fetch latest ClusterMetadataManifest file from remote state store + * Fetch ClusterMetadataManifest files from remote state store in order * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster - * @return latest ClusterMetadataManifest filename + * @param limit max no of files to fetch + * @return all manifest file names */ - private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + private List getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { try { - /** - * {@link BlobContainer#listBlobsByPrefixInSortedOrder} will get the latest manifest file - * as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures - * when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. + + /* + {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first + as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures + when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. */ - List manifestFilesMetadata = manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( + return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( MANIFEST_FILE_PREFIX + DELIMITER, - 1, + limit, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC ); - if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { - return Optional.of(manifestFilesMetadata.get(0).name()); - } } catch (IOException e) { throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); } + } + + /** + * Fetch latest ClusterMetadataManifest file from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return latest ClusterMetadataManifest filename + */ + private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + List manifestFilesMetadata = getManifestFileNames(clusterName, clusterUUID, 1); + if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { + return Optional.of(manifestFilesMetadata.get(0).name()); + } logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); return Optional.empty(); } @@ -757,7 +1043,7 @@ private Optional getLatestManifestFileName(String clusterName, String cl private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) throws IllegalStateException { try { - return RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.read( + return getClusterMetadataManifestBlobStoreFormat(filename).read( manifestContainer(clusterName, clusterUUID), filename, blobStoreRepository.getNamedXContentRegistry() @@ -767,10 +1053,37 @@ private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String cluste } } + private ChecksumBlobStoreFormat getClusterMetadataManifestBlobStoreFormat(String fileName) { + long codecVersion = getManifestCodecVersion(fileName); + if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { + return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V0; + } + + throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); + } + + private int getManifestCodecVersion(String fileName) { + String[] splitName = fileName.split(DELIMITER); + if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { + return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. + } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 + // is used. + return ClusterMetadataManifest.CODEC_V0; + } else { + throw new IllegalArgumentException("Manifest file name is corrupted"); + } + } + public static String encodeString(String content) { return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); } + public void writeMetadataFailed() { + getStats().stateFailed(); + } + /** * Exception for IndexMetadata transfer failures to remote */ @@ -785,13 +1098,27 @@ public IndexMetadataTransferException(String errorDesc, Throwable cause) { } } + /** + * Exception for GlobalMetadata transfer failures to remote + */ + static class GlobalMetadataTransferException extends RuntimeException { + + public GlobalMetadataTransferException(String errorDesc) { + super(errorDesc); + } + + public GlobalMetadataTransferException(String errorDesc, Throwable cause) { + super(errorDesc, cause); + } + } + /** * Purges all remote cluster state against provided cluster UUIDs * * @param clusterName name of the cluster * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged */ - public void deleteStaleClusterMetadata(String clusterName, List clusterUUIDs) { + void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUIDs) { clusterUUIDs.forEach(clusterUUID -> { getBlobStoreTransferService().deleteAsync( ThreadPool.Names.REMOTE_PURGE, @@ -811,6 +1138,7 @@ public void onFailure(Exception e) { ), e ); + remoteStateStats.cleanUpAttemptFailed(); } } ); @@ -824,7 +1152,8 @@ public void onFailure(Exception e) { * @param clusterUUID uuid of cluster state to refer to in remote * @param manifestsToRetain no of latest manifest files to keep in remote */ - private void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { + // package private for testing + void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { if (deleteStaleMetadataRunning.compareAndSet(false, true) == false) { logger.info("Delete stale cluster metadata task is already in progress."); return; @@ -861,8 +1190,9 @@ public void onFailure(Exception e) { } } ); - } finally { + } catch (Exception e) { deleteStaleMetadataRunning.set(false); + throw e; } } @@ -876,6 +1206,7 @@ private void deleteClusterMetadata( Set filesToKeep = new HashSet<>(); Set staleManifestPaths = new HashSet<>(); Set staleIndexMetadataPaths = new HashSet<>(); + Set staleGlobalMetadataPaths = new HashSet<>(); activeManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( clusterName, @@ -884,6 +1215,7 @@ private void deleteClusterMetadata( ); clusterMetadataManifest.getIndices() .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + filesToKeep.add(clusterMetadataManifest.getGlobalMetadataFileName()); }); staleManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( @@ -892,30 +1224,40 @@ private void deleteClusterMetadata( blobMetadata.name() ); staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + if (filesToKeep.contains(clusterMetadataManifest.getGlobalMetadataFileName()) == false) { + String[] globalMetadataSplitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + staleGlobalMetadataPaths.add( + new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + GLOBAL_METADATA_FORMAT.blobName( + globalMetadataSplitPath[globalMetadataSplitPath.length - 1] + ) + ); + } clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { staleIndexMetadataPaths.add( new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() - + uploadedIndexMetadata.getUploadedFilename() - + ".dat" + + INDEX_METADATA_FORMAT.blobName(uploadedIndexMetadata.getUploadedFilename()) ); } }); }); if (staleManifestPaths.isEmpty()) { - logger.info("No stale Remote Cluster Metadata files found"); + logger.debug("No stale Remote Cluster Metadata files found"); return; } + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleGlobalMetadataPaths)); deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); } catch (IllegalStateException e) { logger.error("Error while fetching Remote Cluster Metadata manifests", e); } catch (IOException e) { logger.error("Error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); } catch (Exception e) { logger.error("Unexpected error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); } } @@ -923,4 +1265,31 @@ private void deleteStalePaths(String clusterName, String clusterUUID, List { + String clusterName = clusterState.getClusterName().value(); + logger.debug("Deleting stale cluster UUIDs data from remote [{}]", clusterName); + Set allClustersUUIDsInRemote; + try { + allClustersUUIDsInRemote = new HashSet<>(getAllClusterUUIDs(clusterState.getClusterName().value())); + } catch (IOException e) { + logger.info(String.format(Locale.ROOT, "Error while fetching all cluster UUIDs for [%s]", clusterName)); + return; + } + // Retain last 2 cluster uuids data + allClustersUUIDsInRemote.remove(committedManifest.getClusterUUID()); + allClustersUUIDsInRemote.remove(committedManifest.getPreviousClusterUUID()); + deleteStaleUUIDsClusterMetadata(clusterName, new ArrayList<>(allClustersUUIDsInRemote)); + }); + } + + public RemotePersistenceStats getStats() { + return remoteStateStats; + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java new file mode 100644 index 0000000000000..f2330846fa23e --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.coordination.PersistedStateStats; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Remote state related extended stats. + * + * @opensearch.internal + */ +public class RemotePersistenceStats extends PersistedStateStats { + static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; + static final String REMOTE_UPLOAD = "remote_upload"; + private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); + + public RemotePersistenceStats() { + super(REMOTE_UPLOAD); + addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); + } + + public void cleanUpAttemptFailed() { + cleanupAttemptFailedCount.incrementAndGet(); + } + + public long getCleanupAttemptFailedCount() { + return cleanupAttemptFailedCount.get(); + } +} diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index ed44102d0abe4..b8f8abb6c2c23 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -298,6 +298,7 @@ static int resolvePublishPort(Settings settings, List boundAdd } public void onException(HttpChannel channel, Exception e) { + channel.handleException(e); if (lifecycle.started() == false) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); diff --git a/server/src/main/java/org/opensearch/http/CorsHandler.java b/server/src/main/java/org/opensearch/http/CorsHandler.java index 862c50ae6ac1f..464ced184d10e 100644 --- a/server/src/main/java/org/opensearch/http/CorsHandler.java +++ b/server/src/main/java/org/opensearch/http/CorsHandler.java @@ -81,7 +81,7 @@ * This file is forked from the https://netty.io project. In particular it combines the following three * files: io.netty.handler.codec.http.cors.CorsHandler, io.netty.handler.codec.http.cors.CorsConfig, and * io.netty.handler.codec.http.cors.CorsConfigBuilder. - * + *

                      * It modifies the original netty code to operate on OpenSearch http request/response abstractions. * Additionally, it removes CORS features that are not used by OpenSearch. * diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 99aaed23c69b8..679a5d73c7837 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -36,6 +36,7 @@ import org.opensearch.core.action.ActionListener; import java.net.InetSocketAddress; +import java.util.Optional; /** * Represents an HTTP comms channel @@ -43,6 +44,11 @@ * @opensearch.internal */ public interface HttpChannel extends CloseableChannel { + /** + * Notify HTTP channel that exception happens and the response may not be sent (for example, timeout) + * @param ex the exception being raised + */ + default void handleException(Exception ex) {} /** * Sends an http response to the channel. The listener will be executed once the send process has been @@ -67,4 +73,17 @@ public interface HttpChannel extends CloseableChannel { */ InetSocketAddress getRemoteAddress(); + /** + * Returns the contextual property associated with this specific HTTP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java index c2367383df595..090b1f1d025e0 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java @@ -14,7 +14,7 @@ /** * Implementation of identity plugin that does not enforce authentication or authorization - * + *

                      * This class and related classes in this package will not return nulls or fail access checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java index 424a10204aa19..964a218db3cf5 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java @@ -17,7 +17,7 @@ /** * Implementation of subject that is always authenticated - * + *

                      * This class and related classes in this package will not return nulls or fail permissions checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java index 1dc3a58916b5c..fa6643b7447dc 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java @@ -50,9 +50,4 @@ public String asAuthHeaderValue() { } }; } - - @Override - public Subject authenticateToken(AuthToken authToken) { - return null; - } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java b/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java index c929e7421b3d8..88bb855a6e70d 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java +++ b/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java @@ -16,4 +16,5 @@ public interface AuthToken { String asAuthHeaderValue(); + } diff --git a/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java b/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java index 3fef248ee6d3a..00e50a59e9486 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java +++ b/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java @@ -14,46 +14,17 @@ public class OnBehalfOfClaims { private final String audience; - private final String subject; - private final Long expiration; - private final Long not_before; - private final Long issued_at; + private final Long expiration_seconds; /** * Constructor for OnBehalfOfClaims * @param aud the Audience for the token - * @param subject the subject of the token - * @param expiration the expiration time in seconds for the token - * @param not_before the not_before time in seconds for the token - * @param issued_at the issued_at time in seconds for the token - */ - public OnBehalfOfClaims(String aud, String subject, Long expiration, Long not_before, Long issued_at) { - this.audience = aud; - this.subject = subject; - this.expiration = expiration; - this.not_before = not_before; - this.issued_at = issued_at; - } - - /** - * A constructor that sets a default issued at time of the current time - * @param aud the Audience for the token - * @param subject the subject of the token - * @param expiration the expiration time in seconds for the token - * @param not_before the not_before time in seconds for the token - */ - public OnBehalfOfClaims(String aud, String subject, Long expiration, Long not_before) { - this(aud, subject, expiration, not_before, System.currentTimeMillis() / 1000); - } + * @param expiration_seconds the length of time in seconds the token is valid - /** - * A constructor which sets a default not before time of the current time - * @param aud the Audience for the token - * @param subject the subject of the token - * @param expiration the expiration time in seconds for the token */ - public OnBehalfOfClaims(String aud, String subject, Long expiration) { - this(aud, subject, expiration, System.currentTimeMillis() / 1000); + public OnBehalfOfClaims(String aud, Long expiration_seconds) { + this.audience = aud; + this.expiration_seconds = expiration_seconds; } /** @@ -62,26 +33,14 @@ public OnBehalfOfClaims(String aud, String subject, Long expiration) { * @param subject the subject of the token */ public OnBehalfOfClaims(String aud, String subject) { - this(aud, subject, System.currentTimeMillis() / 1000 + 300); + this(aud, 300L); } public String getAudience() { return audience; } - public String getSubject() { - return subject; - } - public Long getExpiration() { - return expiration; - } - - public Long getNot_before() { - return not_before; - } - - public Long getIssued_at() { - return issued_at; + return expiration_seconds; } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java index 193966001f44c..4bd3ebdded588 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java +++ b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java @@ -26,7 +26,7 @@ public class RestTokenExtractor { /** * Given a rest request it will extract authentication token - * + *

                      * If no token was found, returns null. */ public static AuthToken extractToken(final RestRequest request) { diff --git a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java index b4048251a06a2..972a9a1080955 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java +++ b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java @@ -30,11 +30,4 @@ public interface TokenManager { * @return a new auth token */ public AuthToken issueServiceAccountToken(final String audience); - - /** - * Authenticates a provided authToken - * @param authToken: The authToken to authenticate - * @return The authenticated subject - */ - public Subject authenticateToken(AuthToken authToken); } diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 8692876412ea9..e29283724ebf8 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -81,6 +81,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; @@ -602,7 +603,8 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier + Supplier clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -660,7 +662,8 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, - clusterRemoteTranslogBufferIntervalSupplier + clusterRemoteTranslogBufferIntervalSupplier, + recoverySettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index ca0cc307e460b..84e8e2f41aaf1 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -95,6 +95,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; @@ -178,6 +179,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; private final Supplier clusterRemoteTranslogBufferIntervalSupplier; + private final RecoverySettings recoverySettings; public IndexService( IndexSettings indexSettings, @@ -212,7 +214,8 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier + Supplier clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -289,6 +292,7 @@ public IndexService( this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; + this.recoverySettings = recoverySettings; updateFsyncTaskIfNecessary(); } @@ -519,7 +523,9 @@ public synchronized IndexShard createShard( this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, - clusterRemoteTranslogBufferIntervalSupplier + clusterRemoteTranslogBufferIntervalSupplier, + nodeEnv.nodeId(), + recoverySettings ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); @@ -690,7 +696,7 @@ public IndexSettings getIndexSettings() { /** * Creates a new QueryShardContext. - * + *

                      * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -700,7 +706,7 @@ public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searche /** * Creates a new QueryShardContext. - * + *

                      * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -1283,7 +1289,7 @@ AsyncTranslogFSync getFsyncTask() { // for tests return fsyncTask; } - AsyncTrimTranslogTask getTrimTranslogTask() { // for tests + public AsyncTrimTranslogTask getTrimTranslogTask() { // for tests return trimTranslogTask; } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 1e4224c314f05..00e765d73f77f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -54,6 +54,7 @@ import org.opensearch.node.Node; import org.opensearch.search.pipeline.SearchPipelineService; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -83,9 +84,42 @@ */ @PublicApi(since = "1.0.0") public final class IndexSettings { - private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String DEFAULT_POLICY = "default"; private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + /** + * Enum representing supported merge policies + */ + public enum IndexMergePolicy { + TIERED("tiered"), + LOG_BYTE_SIZE("log_byte_size"), + DEFAULT_POLICY(IndexSettings.DEFAULT_POLICY); + + private final String value; + + IndexMergePolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static IndexMergePolicy fromString(String text) { + for (IndexMergePolicy policy : IndexMergePolicy.values()) { + if (policy.value.equals(text)) { + return policy; + } + } + throw new IllegalArgumentException( + "The setting has unsupported policy specified: " + + text + + ". Please use one of: " + + String.join(", ", Arrays.stream(IndexMergePolicy.values()).map(IndexMergePolicy::getValue).toArray(String[]::new)) + ); + } + } + public static final Setting> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -516,7 +550,7 @@ public final class IndexSettings { /** * This setting controls if unreferenced files will be cleaned up in case segment merge fails due to disk full. - * + *

                      * Defaults to true which means unreferenced files will be cleaned up in case segment merge fails. */ public static final Setting INDEX_UNREFERENCED_FILE_CLEANUP = Setting.boolSetting( @@ -532,7 +566,7 @@ public final class IndexSettings { * documents) on the grounds that a file-based peer recovery may copy all of the documents in the shard over to the new peer, but is * significantly faster than replaying the missing operations on the peer, so once a peer falls far enough behind the primary it makes * more sense to copy all the data over again instead of replaying history. - * + *

                      * Defaults to retaining history for up to 10% of the documents in the shard. This can only be changed in tests, since this setting is * intentionally unregistered. */ @@ -566,11 +600,25 @@ public final class IndexSettings { public static final Setting INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( "index.merge_on_flush.policy", - MERGE_ON_FLUSH_DEFAULT_POLICY, + DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); + public static final Setting INDEX_MERGE_POLICY = Setting.simpleString( + "index.merge.policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.IndexScope + ); + + public static final Setting TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( + "indices.time_series_index.default_index_merge_policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.NodeScope + ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( "index.searchable_snapshot.repository", Property.IndexScope, @@ -620,6 +668,14 @@ public final class IndexSettings { Property.IndexScope ); + public static final Setting INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING = Setting.intSetting( + "index.remote_store.translog.keep_extra_gen", + 100, + 0, + Property.Dynamic, + Property.IndexScope + ); + private final Index index; private final Version version; private final Logger logger; @@ -632,6 +688,7 @@ public final class IndexSettings { private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; private final boolean isRemoteSnapshot; + private int remoteTranslogKeepExtraGen; private Version extendedCompatibilitySnapshotVersion; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock @@ -651,7 +708,8 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; - private final MergePolicyConfig mergePolicyConfig; + private final TieredMergePolicyProvider tieredMergePolicyProvider; + private final LogByteSizeMergePolicyProvider logByteSizeMergePolicyProvider; private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -801,6 +859,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); + this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { @@ -844,7 +903,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); - this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); + this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -866,33 +926,59 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); - - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - mergePolicyConfig::setDeletesPctAllowed + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + tieredMergePolicyProvider::setNoCFSRatio ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - mergePolicyConfig::setExpungeDeletesAllowed + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + tieredMergePolicyProvider::setDeletesPctAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - mergePolicyConfig::setFloorSegmentSetting + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + tieredMergePolicyProvider::setExpungeDeletesAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - mergePolicyConfig::setMaxMergesAtOnce + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + tieredMergePolicyProvider::setFloorSegmentSetting ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - mergePolicyConfig::setMaxMergedSegment + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + tieredMergePolicyProvider::setMaxMergesAtOnce ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - mergePolicyConfig::setSegmentsPerTier + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + tieredMergePolicyProvider::setMaxMergedSegment + ); + scopedSettings.addSettingsUpdateConsumer( + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + tieredMergePolicyProvider::setSegmentsPerTier ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + logByteSizeMergePolicyProvider::setLBSMergeFactor + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMinMergedMB + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeSegment + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeMBForForcedMerge + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeDocs + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, + logByteSizeMergePolicyProvider::setLBSNoCFSRatio + ); scopedSettings.addSettingsUpdateConsumer( MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -945,9 +1031,13 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setRemoteTranslogUploadBufferInterval ); + scopedSettings.addSettingsUpdateConsumer(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, this::setRemoteTranslogKeepExtraGen); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { + if (this.isRemoteStoreEnabled) { + logger.warn("Search idle is not supported for remote backed indices"); + } if (this.replicationType == ReplicationType.SEGMENT && this.getNumberOfReplicas() > 0) { logger.warn("Search idle is not supported for indices with replicas using 'replication.type: SEGMENT'"); } @@ -1221,6 +1311,10 @@ public TimeValue getRemoteTranslogUploadBufferInterval() { return remoteTranslogUploadBufferInterval; } + public int getRemoteTranslogExtraKeep() { + return remoteTranslogKeepExtraGen; + } + /** * Returns true iff the remote translog buffer interval setting exists or in other words is explicitly set. */ @@ -1232,6 +1326,10 @@ public void setRemoteTranslogUploadBufferInterval(TimeValue remoteTranslogUpload this.remoteTranslogUploadBufferInterval = remoteTranslogUploadBufferInterval; } + public void setRemoteTranslogKeepExtraGen(int extraGen) { + this.remoteTranslogKeepExtraGen = extraGen; + } + /** * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ @@ -1439,9 +1537,43 @@ public long getGcDeletesInMillis() { /** * Returns the merge policy that should be used for this index. - */ - public MergePolicy getMergePolicy() { - return mergePolicyConfig.getMergePolicy(); + * @param isTimeSeriesIndex true if index contains @timestamp field + */ + public MergePolicy getMergePolicy(boolean isTimeSeriesIndex) { + String indexScopedPolicy = scopedSettings.get(INDEX_MERGE_POLICY); + MergePolicyProvider mergePolicyProvider = null; + IndexMergePolicy indexMergePolicy = IndexMergePolicy.fromString(indexScopedPolicy); + switch (indexMergePolicy) { + case TIERED: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + case DEFAULT_POLICY: + if (isTimeSeriesIndex) { + String nodeScopedTimeSeriesIndexPolicy = TIME_SERIES_INDEX_MERGE_POLICY.get(nodeSettings); + IndexMergePolicy nodeMergePolicy = IndexMergePolicy.fromString(nodeScopedTimeSeriesIndexPolicy); + switch (nodeMergePolicy) { + case TIERED: + case DEFAULT_POLICY: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + } + } else { + mergePolicyProvider = tieredMergePolicyProvider; + } + break; + } + assert mergePolicyProvider != null : "should not happen as validation for invalid merge policy values " + + "are part of setting definition"; + if (logger.isTraceEnabled()) { + logger.trace("Index: " + this.index.getName() + ", Merge policy used: " + mergePolicyProvider); + } + return mergePolicyProvider.getMergePolicy(); } public T getValue(Setting setting) { @@ -1632,7 +1764,7 @@ public boolean isMergeOnFlushEnabled() { } private void setMergeOnFlushPolicy(String policy) { - if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + if (Strings.isEmpty(policy) || DEFAULT_POLICY.equalsIgnoreCase(policy)) { mergeOnFlushPolicy = null; } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; @@ -1643,7 +1775,7 @@ private void setMergeOnFlushPolicy(String policy) { + " has unsupported policy specified: " + policy + ". Please use one of: " - + MERGE_ON_FLUSH_DEFAULT_POLICY + + DEFAULT_POLICY + ", " + MERGE_ON_FLUSH_MERGE_POLICY ); diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index 763ed34a1e6a6..9edb268a5126c 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -53,7 +53,7 @@ /** * Holds all the information that is used to build the sort order of an index. - * + *

                      * The index sort settings are final and can be defined only at index creation. * These settings are divided in four lists that are merged during the initialization of this class: *

                        diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java new file mode 100644 index 0000000000000..0b762d781957c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_NO_CFS_RATIO; + +/** + *

                        + * The LogByteSizeMergePolicy is an alternative merge policy primarily used here to optimize the merging of segments in scenarios + * with index with timestamps. + * While the TieredMergePolicy is the default choice, the LogByteSizeMergePolicy can be configured + * as the default merge policy for time-index data using the index.datastream_merge.policy setting. + * + *

                        + * Unlike the TieredMergePolicy, which prioritizes merging segments of equal sizes, the LogByteSizeMergePolicy + * specializes in merging adjacent segments efficiently. + * This characteristic makes it particularly well-suited for range queries on time-index data. + * Typically, adjacent segments in time-index data often contain documents with similar timestamps. + * When these segments are merged, the resulting segment covers a range of timestamps with reduced overlap compared + * to the adjacent segments. This reduced overlap remains even as segments grow older and larger, + * which can significantly benefit range queries on timestamps. + * + *

                        + * In contrast, the TieredMergePolicy does not honor this timestamp range optimization. It focuses on merging segments + * of equal sizes and does not consider adjacency. Consequently, as segments grow older and larger, + * the overlap of timestamp ranges among adjacent segments managed by TieredMergePolicy can increase. + * This can lead to inefficiencies in range queries on timestamps, as the number of segments to be scanned + * within a given timestamp range could become high. + * + * @opensearch.internal + */ +public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { + private final LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MERGE_FACTOR = 10; + + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + + public static final ByteSizeValue DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE = new ByteSizeValue(Long.MAX_VALUE); + + public static final Setting INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.merge_factor", + DEFAULT_MERGE_FACTOR, // keeping it same as default max merge at once for tiered merge policy + 2, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.min_merge", + DEFAULT_MIN_MERGE, // keeping it same as default floor segment for tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment", + DEFAULT_MAX_MERGED_SEGMENT, // keeping default same as tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment_forced_merge", + DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGED_DOCS_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.max_merged_docs", + DEFAULT_MAX_MERGE_DOCS, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_NO_CFS_RATIO_SETTING = new Setting<>( + "index.merge.log_byte_size_policy.no_cfs_ratio", + Double.toString(DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + LogByteSizeMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + + // Undocumented settings, works great with defaults + logByteSizeMergePolicy.setMergeFactor(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING)); + logByteSizeMergePolicy.setMinMergeMB(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMB(indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge( + indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING).getMbFrac() + ); + logByteSizeMergePolicy.setMaxMergeDocs(indexSettings.getValue(INDEX_LBS_MAX_MERGED_DOCS_SETTING)); + logByteSizeMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_LBS_NO_CFS_RATIO_SETTING)); + } + + @Override + public MergePolicy getMergePolicy() { + return mergesEnabled ? logByteSizeMergePolicy : NoMergePolicy.INSTANCE; + } + + void setLBSMergeFactor(int mergeFactor) { + logByteSizeMergePolicy.setMergeFactor(mergeFactor); + } + + void setLBSMaxMergeSegment(ByteSizeValue maxMergeSegment) { + logByteSizeMergePolicy.setMaxMergeMB(maxMergeSegment.getMbFrac()); + } + + void setLBSMinMergedMB(ByteSizeValue minMergedSize) { + logByteSizeMergePolicy.setMinMergeMB(minMergedSize.getMbFrac()); + } + + void setLBSMaxMergeMBForForcedMerge(ByteSizeValue maxMergeForcedMerge) { + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge(maxMergeForcedMerge.getMbFrac()); + } + + void setLBSMaxMergeDocs(int maxMergeDocs) { + logByteSizeMergePolicy.setMaxMergeDocs(maxMergeDocs); + } + + void setLBSNoCFSRatio(Double noCFSRatio) { + logByteSizeMergePolicy.setNoCFSRatio(noCFSRatio); + } + + @Override + public String toString() { + return "LogByteSizeMergePolicyProvider{" + + "mergeFactor=" + + logByteSizeMergePolicy.getMergeFactor() + + ", minMergeMB=" + + logByteSizeMergePolicy.getMinMergeMB() + + ", maxMergeMB=" + + logByteSizeMergePolicy.getMaxMergeMB() + + ", maxMergeMBForForcedMerge=" + + logByteSizeMergePolicy.getMaxMergeMBForForcedMerge() + + ", maxMergedDocs=" + + logByteSizeMergePolicy.getMaxMergeDocs() + + ", noCFSRatio=" + + logByteSizeMergePolicy.getNoCFSRatio() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyProvider.java b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java new file mode 100644 index 0000000000000..6f734314f758f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.index.MergePolicy; +import org.opensearch.common.annotation.InternalApi; + +/** + * A provider for obtaining merge policies used by OpenSearch indexes. + * + * @opensearch.internal + */ + +@InternalApi +public interface MergePolicyProvider { + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + String INDEX_MERGE_ENABLED = "index.merge.enabled"; + + /** + * Gets the merge policy to be used for index. + * + * @return The merge policy instance. + */ + MergePolicy getMergePolicy(); +} diff --git a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java index 9e170b448d641..b2b7781a20d26 100644 --- a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java +++ b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java @@ -48,14 +48,14 @@ * *

                          *
                        • index.merge.scheduler.max_thread_count: - * + *

                          * The maximum number of threads that may be merging at once. Defaults to * Math.max(1, Math.min(4, {@link OpenSearchExecutors#allocatedProcessors(Settings)} / 2)) * which works well for a good solid-state-disk (SSD). If your index is on * spinning platter drives instead, decrease this to 1. * *

                        • index.merge.scheduler.auto_throttle: - * + *

                          * If this is true (the default), then the merge scheduler will rate-limit IO * (writes) for merges to an adaptive value depending on how many merges are * requested over time. An application with a low indexing rate that diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java index 9cc6685c75f80..0ae4526365bf1 100644 --- a/server/src/main/java/org/opensearch/index/ReplicationStats.java +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -8,11 +8,9 @@ package org.opensearch.index; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -76,9 +74,9 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.SEGMENT_REPLICATION); - builder.field(Fields.MAX_BYTES_BEHIND, new ByteSizeValue(maxBytesBehind).toString()); - builder.field(Fields.TOTAL_BYTES_BEHIND, new ByteSizeValue(totalBytesBehind).toString()); - builder.field(Fields.MAX_REPLICATION_LAG, new TimeValue(maxReplicationLag)); + builder.field(Fields.MAX_BYTES_BEHIND, maxBytesBehind); + builder.field(Fields.TOTAL_BYTES_BEHIND, totalBytesBehind); + builder.field(Fields.MAX_REPLICATION_LAG, maxReplicationLag); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index 4284daf9ffef4..d9d480e7b2b27 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -106,10 +106,11 @@ public SegmentReplicationPressureService( ClusterService clusterService, IndicesService indicesService, ShardStateAction shardStateAction, + SegmentReplicationStatsTracker tracker, ThreadPool threadPool ) { this.indicesService = indicesService; - this.tracker = new SegmentReplicationStatsTracker(this.indicesService); + this.tracker = tracker; this.shardStateAction = shardStateAction; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java new file mode 100644 index 0000000000000..492f253bbcb7c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.Version; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Segment replication rejection stats. + * + * @opensearch.internal + */ +public class SegmentReplicationRejectionStats implements Writeable, ToXContentFragment { + + /** + * Total rejections due to segment replication backpressure + */ + private long totalRejectionCount; + + public SegmentReplicationRejectionStats(final long totalRejectionCount) { + this.totalRejectionCount = totalRejectionCount; + } + + public SegmentReplicationRejectionStats(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.totalRejectionCount = in.readVLong(); + } + } + + public long getTotalRejectionCount() { + return totalRejectionCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("segment_replication_backpressure"); + builder.field("total_rejected_requests", totalRejectionCount); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeVLong(totalRejectionCount); + } + } + + @Override + public String toString() { + return "SegmentReplicationRejectionStats{ totalRejectedRequestCount=" + totalRejectionCount + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java index 6d5c00c08caff..f5fc8aa1c1eea 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -33,6 +33,14 @@ public SegmentReplicationStatsTracker(IndicesService indicesService) { rejectionCount = ConcurrentCollections.newConcurrentMap(); } + public SegmentReplicationRejectionStats getTotalRejectionStats() { + return new SegmentReplicationRejectionStats(this.rejectionCount.values().stream().mapToInt(AtomicInteger::get).sum()); + } + + protected Map getRejectionCount() { + return rejectionCount; + } + public SegmentReplicationStats getStats() { Map stats = new HashMap<>(); for (IndexService indexService : indicesService) { diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java index 07768c6769b71..a6135186fb5ff 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java @@ -34,7 +34,7 @@ * Interfaces returns Releasable which when triggered will release the acquired accounting tokens values and also * perform necessary actions such as throughput evaluation once the request completes. * Consumers of these interfaces are expected to trigger close on releasable, reliably for consistency. - * + *

                          * Overall ShardIndexingPressure provides: * 1. Memory Accounting at shard level. This can be enabled/disabled based on dynamic setting. * 2. Memory Accounting at Node level. Tracking is done using the IndexingPressure artefacts to support feature seamless toggling. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java index dea3cc8970cb5..e5c1af2e9c9f0 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java @@ -30,16 +30,16 @@ /** * The Shard Indexing Pressure Memory Manager is the construct responsible for increasing and decreasing the allocated shard limit * based on incoming requests. A shard limits defines the maximum memory that a shard can occupy in the heap for request objects. - * + *

                          * Based on the overall memory utilization on the node, and current traffic needs shard limits will be modified: - * + *

                          * 1. If the limits assigned to a shard is breached (Primary Parameter) while the node level overall occupancy across all shards * is not greater than primary_parameter.node.soft_limit, MemoryManager will increase the shard limits without any deeper evaluation. * 2. If the limits assigned to the shard is breached(Primary Parameter) and the node level overall occupancy across all shards * is greater than primary_parameter.node.soft_limit, then MemoryManager will evaluate deeper parameters for shards to identify any * issues, such as throughput degradation (Secondary Parameter - 1) and time since last request was successful (Secondary Parameter - 2). * This helps identify detect any duress state with the shard, requesting more memory. - * + *

                          * Secondary Parameters covered above: * 1. ThroughputDegradationLimitsBreached - When the moving window throughput average has increased by a factor compared to * the historical throughput average. If the factor by which it has increased is greater than the degradation limit threshold, this @@ -47,7 +47,7 @@ * 2. LastSuccessfulRequestDurationLimitsBreached - When the time since the last successful request completed is greater than the max * timeout threshold value, while there a number of outstanding requests greater than the max outstanding requests then this parameter * is considered to be breached. - * + *

                          * MemoryManager attempts to increase of decrease the shard limits in case the shard utilization goes below operating_factor.lower or * goes above operating_factor.upper of current shard limits. MemoryManager attempts to update the new shard limit such that the new value * remains withing the operating_factor.optimal range of current shard utilization. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java index b41dd1359394b..9b24d119f24fd 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java @@ -22,24 +22,24 @@ * Shard indexing pressure store acts as a central repository for all the shard-level tracker objects currently being * used at the Node level, for tracking indexing pressure requests. * Store manages the tracker lifecycle, from creation, access, until it is evicted to be collected. - * + *

                          * Trackers are maintained at two levels for access simplicity and better memory management: - * + *

                          * 1. shardIndexingPressureHotStore : As the name suggests, it is hot store for tracker objects which are currently live i.e. being used * to track an ongoing request. - * + *

                          * 2. shardIndexingPressureColdStore : This acts as the store for all the shard tracking objects which are currently being used * by the framework. In addition to hot trackers, the recently used trackers which are although not currently live, but again can be used * in near future, are also part of this store. To limit any memory implications, this store has an upper limit on the maximum number of * trackers its can hold at any given time, which is a configurable dynamic setting. - * + *

                          * Tracking objects when created are part of both the hot store as well as cold store. However, once the object * is no more live it is removed from the hot store. Objects in the cold store are evicted once the cold store * reaches its maximum limit. Think of it like a periodic purge when upper limit is hit. * During get if tracking object is not present in the hot store, a lookup is made into the cache store. If found, * object is brought into the hot store again, until it remains active. If not present in the either store, a fresh * object is instantiated and registered in both the stores for concurrent accesses. - * + *

                          * Note: The implementation of shardIndexingPressureColdStore methods is such that get, * update and evict operations can be abstracted out to support any other strategy such as LRU, if * discovered a need later. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java index 7d67b47141ef5..e0edb8260fd0f 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java @@ -14,19 +14,19 @@ * This class is responsible for all the tracking that needs to be performed at every Shard Level for Indexing Operations on the node. * Info is maintained at the granularity of three kinds of write operation (tasks) on the node i.e. coordinating, primary and replica. * This is useful in evaluating the shard indexing back-pressure on the node, to throttle requests and also to publish runtime stats. - * + *

                          * There can be four kinds of operation tracking on a node which needs to performed for a shard: * 1. Coordinating Operation : To track all the individual shard bulk request on the coordinator node. * 2. Primary Operation : To track all the individual shard bulk request on the primary node. * 3. Replica Operation : To track all the individual shard bulk request on the replica node. * 4. Common Operation : To track values applicable across the specific shard role. - * + *

                          * ShardIndexingPressureTracker therefore provides the construct to track all the write requests targeted for a ShardId on the node, * across all possible transport-write-actions i.e. Coordinator, Primary and Replica. * Tracker is uniquely identified against a Shard-Id on the node. Currently the knowledge of shard roles (such as primary vs replica) * is not explicit to the tracker, and it is able to track different values simultaneously based on the interaction hooks of the * operation type i.e. write-action layers. - * + *

                          * There is room for introducing more unique identity to the trackers based on Shard-Role or Shard-Allocation-Id, but that will also * increase the complexity of handling shard-lister events and handling other race scenarios such as request-draining etc. * To prefer simplicity we have modelled by keeping explicit fields for different operation tracking, while tracker by itself is diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java similarity index 82% rename from server/src/main/java/org/opensearch/index/MergePolicyConfig.java rename to server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java index fe2af21dfe039..d5d354c6c960a 100644 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.TieredMergePolicy; @@ -47,9 +48,12 @@ * where the index data is stored, and are immutable up to delete markers. * Segments are, periodically, merged into larger segments to keep the * index size at bay and expunge deletes. + * This class customizes and exposes 2 merge policies from lucene - + * {@link LogByteSizeMergePolicy} and {@link TieredMergePolicy}. + * * *

                          - * Merges select segments of approximately equal size, subject to an allowed + * Tiered merge policy select segments of approximately equal size, subject to an allowed * number of segments per tier. The merge policy is able to merge * non-adjacent segments, and separates how many segments are merged at once from how many * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). @@ -125,8 +129,9 @@ * @opensearch.internal */ -public final class MergePolicyConfig { - private final OpenSearchTieredMergePolicy mergePolicy = new OpenSearchTieredMergePolicy(); +public final class TieredMergePolicyProvider implements MergePolicyProvider { + private final OpenSearchTieredMergePolicy tieredMergePolicy = new OpenSearchTieredMergePolicy(); + private final Logger logger; private final boolean mergesEnabled; @@ -137,10 +142,11 @@ public final class MergePolicyConfig { public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( "index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, + TieredMergePolicyProvider::parseNoCFSRatio, Property.Dynamic, Property.IndexScope ); @@ -194,10 +200,8 @@ public final class MergePolicyConfig { Property.Dynamic, Property.IndexScope ); - // don't convert to Setting<> and register... we only set this in tests and register via a plugin - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; - MergePolicyConfig(Logger logger, IndexSettings indexSettings) { + TieredMergePolicyProvider(Logger logger, IndexSettings indexSettings) { this.logger = logger; double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); @@ -216,54 +220,41 @@ public final class MergePolicyConfig { ); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); - mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); - mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - mergePolicy.setSegmentsPerTier(segmentsPerTier); - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - if (logger.isTraceEnabled()) { - logger.trace( - "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," - + " deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, - floorSegment, - maxMergeAtOnce, - maxMergedSegment, - segmentsPerTier, - deletesPctAllowed - ); - } + tieredMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + tieredMergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); + tieredMergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } void setSegmentsPerTier(Double segmentsPerTier) { - mergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); } void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); } void setMaxMergesAtOnce(Integer maxMergeAtOnce) { - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + tieredMergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); } void setExpungeDeletesAllowed(Double value) { - mergePolicy.setForceMergeDeletesPctAllowed(value); + tieredMergePolicy.setForceMergeDeletesPctAllowed(value); } void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); + tieredMergePolicy.setNoCFSRatio(noCFSRatio); } void setDeletesPctAllowed(Double deletesPctAllowed) { - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { @@ -285,11 +276,11 @@ private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerT return maxMergeAtOnce; } - MergePolicy getMergePolicy() { - return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; + public MergePolicy getMergePolicy() { + return mergesEnabled ? tieredMergePolicy : NoMergePolicy.INSTANCE; } - private static double parseNoCFSRatio(String noCFSRatio) { + public static double parseNoCFSRatio(String noCFSRatio) { noCFSRatio = noCFSRatio.trim(); if (noCFSRatio.equalsIgnoreCase("true")) { return 1.0d; @@ -310,4 +301,23 @@ private static double parseNoCFSRatio(String noCFSRatio) { } } } + + @Override + public String toString() { + return "TieredMergePolicyProvider{" + + "expungeDeletesAllowed=" + + tieredMergePolicy.getForceMergeDeletesPctAllowed() + + ", floorSegment=" + + tieredMergePolicy.getFloorSegmentMB() + + ", maxMergeAtOnce=" + + tieredMergePolicy.getMaxMergeAtOnce() + + ", maxMergedSegment=" + + tieredMergePolicy.getMaxMergedSegmentMB() + + ", segmentsPerTier=" + + tieredMergePolicy.getSegmentsPerTier() + + ", deletesPctAllowed=" + + tieredMergePolicy.getDeletesPctAllowed() + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/index/VersionType.java b/server/src/main/java/org/opensearch/index/VersionType.java index 3ce7f600a6a5b..8aa1fbd1b81ac 100644 --- a/server/src/main/java/org/opensearch/index/VersionType.java +++ b/server/src/main/java/org/opensearch/index/VersionType.java @@ -244,7 +244,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on write. - * + *

                          * Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true; * * @param currentVersion the current version for the document @@ -265,7 +265,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on read. - * + *

                          * Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true; * * @param currentVersion the current version for the document diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index cfdf416b2d533..3b1f6af8a0030 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -244,7 +244,7 @@ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException { /** * Creates a custom analyzer from a collection of {@link NameOrDefinition} specifications for each component - * + *

                          * Callers are responsible for closing the returned Analyzer */ public NamedAnalyzer buildCustomAnalyzer( diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java index ab8d23339029c..30fe31105e1d9 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java @@ -36,7 +36,7 @@ /** * A CharFilterFactory that also supports normalization - * + *

                          * The default implementation of {@link #normalize(Reader)} delegates to * {@link #create(Reader)} * diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java index be761aee0d36c..2ed621cdd22b1 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java @@ -36,7 +36,7 @@ /** * A TokenFilterFactory that may be used for normalization - * + *

                          * The default implementation delegates {@link #normalize(TokenStream)} to * {@link #create(TokenStream)}}. * diff --git a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index a65e1898bea0d..8719d127781e0 100644 --- a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -102,7 +102,7 @@ public void close() throws IOException { /** * A special cache that closes the gap between PreBuiltAnalyzers and PreBuiltAnalyzerProviderFactory. - * + *

                          * This can be removed when all analyzers have been moved away from PreBuiltAnalyzers to * PreBuiltAnalyzerProviderFactory either in server or analysis-common. * diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index e66ae20508dfe..16a1f9a067998 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -155,11 +155,11 @@ public TokenStream create(TokenStream tokenStream) { filter.setTokenSeparator(tokenSeparator); filter.setFillerToken(fillerToken); if (outputUnigrams || (minShingleSize != maxShingleSize)) { - /** - * We disable the graph analysis on this token stream - * because it produces shingles of different size. - * Graph analysis on such token stream is useless and dangerous as it may create too many paths - * since shingles of different size are not aligned in terms of positions. + /* + We disable the graph analysis on this token stream + because it produces shingles of different size. + Graph analysis on such token stream is useless and dangerous as it may create too many paths + since shingles of different size are not aligned in terms of positions. */ filter.addAttribute(DisableGraphAttribute.class); } diff --git a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java index 1b9d781b177ce..6708db8571b2a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java @@ -51,7 +51,7 @@ public interface TokenFilterFactory { /** * Normalize a tokenStream for use in multi-term queries - * + *

                          * The default implementation is a no-op */ default TokenStream normalize(TokenStream tokenStream) { @@ -86,7 +86,7 @@ default TokenFilterFactory getChainAwareTokenFilterFactory( /** * Return a version of this TokenFilterFactory appropriate for synonym parsing - * + *

                          * Filters that should not be applied to synonyms (for example, those that produce * multiple tokens) should throw an exception * diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index a8f2f60f8cf12..9b0e1b70a8e05 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -145,6 +145,7 @@ public abstract class Engine implements LifecycleAware, Closeable { protected final EngineConfig engineConfig; protected final Store store; protected final AtomicBoolean isClosed = new AtomicBoolean(false); + private final CounterMetric totalUnreferencedFileCleanUpsPerformed = new CounterMetric(); private final CountDownLatch closedLatch = new CountDownLatch(1); protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); @@ -267,6 +268,13 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Returns the unreferenced file cleanup count for this engine. + */ + public long unreferencedFileCleanUpsPerformed() { + return totalUnreferencedFileCleanUpsPerformed.count(); + } + /** * Performs the pre-closing checks on the {@link Engine}. * @@ -1340,7 +1348,9 @@ private void cleanUpUnreferencedFiles() { .setOpenMode(IndexWriterConfig.OpenMode.APPEND) ) ) { - // do nothing and close this will kick off IndexFileDeleter which will remove all unreferenced files. + // do nothing except increasing metric count and close this will kick off IndexFileDeleter which will + // remove all unreferenced files + totalUnreferencedFileCleanUpsPerformed.inc(); } catch (Exception ex) { logger.error("Error while deleting unreferenced file ", ex); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 3eeceff2253c1..8e1627af274c5 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -402,7 +402,7 @@ public CompletionStats completionStats(String... fieldNamePatterns) { * The main purpose for this is that if we have external refreshes happening we don't issue extra * refreshes to clear version map memory etc. this can cause excessive segment creation if heavy indexing * is happening and the refresh interval is low (ie. 1 sec) - * + *

                          * This also prevents segment starvation where an internal reader holds on to old segments literally forever * since no indexing is happening and refreshes are only happening to the external reader manager, while with * this specialized implementation an external refresh will immediately be reflected on the internal reader diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 570a2b186841a..f9f0458f45ee8 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -180,7 +180,7 @@ public synchronized void updateSegments(final SegmentInfos infos) throws IOExcep /** * Persist the latest live SegmentInfos. - * + *

                          * This method creates a commit point from the latest SegmentInfos. * * @throws IOException - When there is an IO error committing the SegmentInfos. @@ -379,6 +379,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { try { commitSegmentInfos(); } catch (IOException e) { + maybeFailEngine("flush", e); throw new FlushFailedEngineException(shardId, e); } finally { flushLock.unlock(); @@ -437,13 +438,29 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } - commitSegmentInfos(latestSegmentInfos); - IOUtils.close(readerManager, translogManager, store::decRef); + try { + commitSegmentInfos(latestSegmentInfos); + } catch (IOException e) { + // mark the store corrupted unless we are closing as result of engine failure. + // in this case Engine#failShard will handle store corruption. + if (failEngineLock.isHeldByCurrentThread() == false && store.isMarkedCorrupted() == false) { + try { + store.markStoreCorrupted(e); + } catch (IOException ex) { + logger.warn("Unable to mark store corrupted", ex); + } + } + } + IOUtils.close(readerManager, translogManager); } catch (Exception e) { - logger.warn("failed to close engine", e); + logger.error("failed to close engine", e); } finally { - logger.debug("engine closed [{}]", reason); - closedLatch.countDown(); + try { + store.decRef(); + logger.debug("engine closed [{}]", reason); + } finally { + closedLatch.countDown(); + } } } } diff --git a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java index 2e8bd6409c2f6..19454967f9ee3 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java +++ b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java @@ -22,7 +22,7 @@ /** * This class is heavily influenced by Lucene's ReplicaFileDeleter class used to keep track of * segment files that should be preserved on replicas between replication events. - * + *

                          * https://github.com/apache/lucene/blob/main/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java index 2df4baeb8631b..3090b8e7f5b15 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java @@ -73,7 +73,7 @@ protected MultiGeoPointValues() {} /** * Return the next value associated with the current document. This must not be * called more than {@link #docValueCount()} times. - * + *

                          * Note: the returned {@link GeoPoint} might be shared across invocations. * * @return the next value for the current docID set to {@link #advanceExact(int)}. diff --git a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java index a3c3774b250dc..5860cfe8a47c3 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java @@ -55,7 +55,7 @@ /** * Script level doc values, the assumption is that any implementation will * implement a {@link Longs#getValue getValue} method. - * + *

                          * Implementations should not internally re-use objects for the values that they * return as a single {@link ScriptDocValues} instance can be reused to return * values form multiple documents. @@ -589,11 +589,11 @@ public BytesRef get(int index) { + "Use doc[].size()==0 to check if a document is missing a field!" ); } - /** - * We need to make a copy here because {@link BinaryScriptDocValues} might reuse the - * returned value and the same instance might be used to - * return values from multiple documents. - **/ + /* + We need to make a copy here because {@link BinaryScriptDocValues} might reuse the + returned value and the same instance might be used to + return values from multiple documents. + */ return values[index].toBytesRef(); } diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java index 4f27c9b10f0ee..fe033fa7a3f70 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java @@ -101,9 +101,9 @@ public SortedSetOrdinalsIndexFieldData( @Override public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); - /** - * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and - * returns a custom sort field otherwise. + /* + Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and + returns a custom sort field otherwise. */ if (nested != null || (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java index a038cf178bb03..3b6782b34feea 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java @@ -121,7 +121,7 @@ public abstract static class Parser { /** * Given a parsed value and a format string, formats the value into a plain Java object. - * + *

                          * Supported formats include 'geojson' and 'wkt'. The different formats are defined * as subclasses of {@link org.opensearch.common.geo.GeometryFormat}. */ @@ -129,7 +129,7 @@ public abstract static class Parser { /** * Parses the given value, then formats it according to the 'format' string. - * + *

                          * By default, this method simply parses the value using {@link Parser#parse}, then formats * it with {@link Parser#format}. However some {@link Parser} implementations override this * as they can avoid parsing the value if it is already in the right format. diff --git a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java index f5dc34ab8ac5d..b3112df86bab6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java @@ -43,7 +43,7 @@ /** * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. - * + *

                          * This class differs from {@link SourceValueFetcher} in that it directly handles * array values in parsing. Field types should use this class if their corresponding * mapper returns true for {@link FieldMapper#parsesArrayValue()}. diff --git a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java index 90ef3cc1689a4..e1413fd9b4bbe 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java @@ -447,13 +447,13 @@ int getMaxInputLength() { /** * Parses and indexes inputs - * + *

                          * Parsing: * Acceptable format: * "STRING" - interpreted as field value (input) * "ARRAY" - each element can be one of "OBJECT" (see below) * "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT } - * + *

                          * Indexing: * if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)} * else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField} diff --git a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java index 2776e7515bbf6..fbb67731f581b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java +++ b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java @@ -42,8 +42,7 @@ import java.io.Reader; /** - * Base class for constructing a custom docvalues type - * + * Base class for constructing a custom docvalues type. * used for binary, geo, and range fields * * @opensearch.api diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 393ddf2dd1de0..3b832628695fe 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -52,6 +52,7 @@ import org.opensearch.common.time.DateMathParser; import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.LocaleUtils; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; @@ -91,7 +92,21 @@ public final class DateFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "date"; public static final String DATE_NANOS_CONTENT_TYPE = "date_nanos"; - public static final DateFormatter DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); + @Deprecated + public static final DateFormatter LEGACY_DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern( + // TODO remove in 3.0 after backporting + "strict_date_optional_time||epoch_millis" + ); + public static final DateFormatter DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern( + "strict_date_time_no_millis||strict_date_optional_time||epoch_millis", + "strict_date_optional_time" + ); + + public static DateFormatter getDefaultDateTimeFormatter() { + return FeatureFlags.isEnabled(FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING) + ? DEFAULT_DATE_TIME_FORMATTER + : LEGACY_DEFAULT_DATE_TIME_FORMATTER; + } /** * Resolution of the date time @@ -223,8 +238,14 @@ public static class Builder extends ParametrizedFieldMapper.Builder { "format", false, m -> toType(m).format, - DEFAULT_DATE_TIME_FORMATTER.pattern() + getDefaultDateTimeFormatter().pattern() ); + private final Parameter printFormat = Parameter.stringParam( + "print_format", + false, + m -> toType(m).printFormat, + getDefaultDateTimeFormatter().printPattern() + ).acceptsNull(); private final Parameter locale = new Parameter<>( "locale", false, @@ -253,13 +274,18 @@ public Builder( this.ignoreMalformed = Parameter.boolParam("ignore_malformed", true, m -> toType(m).ignoreMalformed, ignoreMalformedByDefault); if (dateFormatter != null) { this.format.setValue(dateFormatter.pattern()); + this.printFormat.setValue(dateFormatter.printPattern()); this.locale.setValue(dateFormatter.locale()); } } private DateFormatter buildFormatter() { try { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); + if (format.isConfigured() && !printFormat.isConfigured()) { + return DateFormatter.forPattern(format.getValue(), null, !format.isConfigured()).withLocale(locale.getValue()); + } + return DateFormatter.forPattern(format.getValue(), printFormat.getValue(), !format.isConfigured()) + .withLocale(locale.getValue()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } @@ -267,7 +293,7 @@ private DateFormatter buildFormatter() { @Override protected List> getParameters() { - return Arrays.asList(index, docValues, store, format, locale, nullValue, ignoreMalformed, boost, meta); + return Arrays.asList(index, docValues, store, format, printFormat, locale, nullValue, ignoreMalformed, boost, meta); } private Long parseNullValue(DateFieldType fieldType) { @@ -346,7 +372,7 @@ public DateFieldType( } public DateFieldType(String name) { - this(name, true, false, true, DEFAULT_DATE_TIME_FORMATTER, Resolution.MILLISECONDS, null, Collections.emptyMap()); + this(name, true, false, true, getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap()); } public DateFieldType(String name, DateFormatter dateFormatter) { @@ -354,7 +380,7 @@ public DateFieldType(String name, DateFormatter dateFormatter) { } public DateFieldType(String name, Resolution resolution) { - this(name, true, false, true, DEFAULT_DATE_TIME_FORMATTER, resolution, null, Collections.emptyMap()); + this(name, true, false, true, getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap()); } public DateFieldType(String name, Resolution resolution, DateFormatter dateFormatter) { @@ -610,6 +636,7 @@ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { private final boolean hasDocValues; private final Locale locale; private final String format; + private final String printFormat; private final boolean ignoreMalformed; private final Long nullValue; private final String nullValueAsString; @@ -633,6 +660,7 @@ private DateFieldMapper( this.hasDocValues = builder.docValues.getValue(); this.locale = builder.locale.getValue(); this.format = builder.format.getValue(); + this.printFormat = builder.printFormat.getValue(); this.ignoreMalformed = builder.ignoreMalformed.getValue(); this.nullValueAsString = builder.nullValue.getValue(); this.nullValue = nullValue; diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index dbafc2a923cf2..db58c5e02e176 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -197,7 +197,7 @@ private static String getRemainingFields(Map map) { /** * Given an optional type name and mapping definition, returns the type and a normalized form of the mappings. - * + *

                          * The provided mapping definition may or may not contain the type name as the root key in the map. This method * attempts to unwrap the mappings, so that they no longer contain a type name at the root. If no type name can * be found, through either the 'type' parameter or by examining the provided mappings, then an exception will be @@ -205,7 +205,6 @@ private static String getRemainingFields(Map map) { * * @param type An optional type name. * @param root The mapping definition. - * * @return A tuple of the form (type, normalized mappings). */ @SuppressWarnings({ "unchecked" }) diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java index 94bc4806ba0e0..2e59d86f9119c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java @@ -43,7 +43,7 @@ * to {@link DynamicKeyFieldMapper#keyedFieldType(String)}, with 'some_key' passed as the * argument. The field mapper is allowed to create a new field type dynamically in order * to handle the search. - * + *

                          * To prevent conflicts between these dynamic sub-keys and multi-fields, any field mappers * implementing this interface should explicitly disallow multi-fields. The constructor makes * sure to passes an empty multi-fields list to help prevent conflicting sub-keys from being diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java index 13150ddc50a51..a415078108eb6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java @@ -37,11 +37,11 @@ /** * A container that supports looking up field types for 'dynamic key' fields ({@link DynamicKeyFieldMapper}). - * + *

                          * Compared to standard fields, 'dynamic key' fields require special handling. Given a field name of the form * 'path_to_field.path_to_key', the container will dynamically return a new {@link MappedFieldType} that is * suitable for performing searches on the sub-key. - * + *

                          * Note: we anticipate that 'flattened' fields will be the only implementation {@link DynamicKeyFieldMapper}. * Flattened object fields live in the 'mapper-flattened' module. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java index 86c92ec19a2f7..ff9cb61b85571 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java @@ -43,7 +43,7 @@ /** * A mapper for field aliases. - * + *

                          * A field alias has no concrete field mappings of its own, but instead points to another field by * its path. Once defined, an alias can be used in place of the concrete field name in search requests. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java index f8514c86fa418..f6178a8284945 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java @@ -305,7 +305,7 @@ public void parse(ParseContext context) throws IOException { /** * Parse the field value and populate the fields on {@link ParseContext#doc()}. - * + *

                          * Implementations of this method should ensure that on failing to parse parser.currentToken() must be the * current failing token */ diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java index 8e1b6f2a3c08b..549ab0403e5dc 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java @@ -57,7 +57,7 @@ class FieldTypeLookup implements Iterable { * A map from field name to all fields whose content has been copied into it * through copy_to. A field only be present in the map if some other field * has listed it as a target of copy_to. - * + *

                          * For convenience, the set of copied fields includes the field itself. */ private final Map> fieldToCopiedFields = new HashMap<>(); @@ -133,7 +133,7 @@ public Set simpleMatchToFullName(String pattern) { /** * Given a concrete field name, return its paths in the _source. - * + *

                          * For most fields, the source path is the same as the field itself. However * there are cases where a field's values are found elsewhere in the _source: * - For a multi-field, the source path is the parent field. diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index db35c3edcc4a8..00b623dddac23 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -575,9 +575,9 @@ protected void parseCreateField(ParseContext context) throws IOException { context, fieldType().name() ); - /** - * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser - * It reads the JSON object and parsed to a list of string + /* + JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + It reads the JSON object and parsed to a list of string */ XContentParser parser = JsonToStringParser.parseObject(); diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java index e3dab3f892949..fcca7e9804bf3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java @@ -63,7 +63,7 @@ /** * Field Mapper for geo_point types. - * + *

                          * Uses lucene 6 LatLonPoint encoding * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 58aa0bb2576e2..997835f712038 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -125,7 +125,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S /** * Create a helper class to fetch field values during the {@link FetchFieldsPhase}. - * + *

                          * New field types must implement this method in order to support the search 'fields' option. Except * for metadata fields, field types should not throw {@link UnsupportedOperationException} since this * could cause a search retrieving multiple fields (like "fields": ["*"]) to fail. @@ -487,7 +487,7 @@ public Map meta() { /** * Returns information on how any text in this field is indexed - * + *

                          * Fields that do not support any text-based queries should return * {@link TextSearchInfo#NONE}. Some fields (eg numeric) may support * only simple match queries, and can return diff --git a/server/src/main/java/org/opensearch/index/mapper/Mapper.java b/server/src/main/java/org/opensearch/index/mapper/Mapper.java index 59c647d38f0de..9c0e3ef72549a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/Mapper.java @@ -190,7 +190,7 @@ public Supplier queryShardContextSupplier() { /** * Gets an optional default date format for date fields that do not have an explicit format set - * + *

                          * If {@code null}, then date fields will default to {@link DateFieldMapper#DEFAULT_DATE_TIME_FORMATTER}. */ public DateFormatter getDateFormatter() { diff --git a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java index 024f4b71584bf..1c608fc52c1f5 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java @@ -157,7 +157,7 @@ public MappingLookup( /** * Returns the leaf mapper associated with this field name. Note that the returned mapper * could be either a concrete {@link FieldMapper}, or a {@link FieldAliasMapper}. - * + *

                          * To access a field's type information, {@link MapperService#fieldType} should be used instead. */ public Mapper getMapper(String field) { diff --git a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java index 4f5aefdfeed55..5a45ab72994ee 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java @@ -70,7 +70,7 @@ public interface TypeParser extends Mapper.TypeParser { /** * Declares an updateable boolean parameter for a metadata field - * + *

                          * We need to distinguish between explicit configuration and default value for metadata * fields, because mapping updates will carry over the previous metadata values if a * metadata field is not explicitly declared in the update. A standard boolean diff --git a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java index fd57975831e88..93b929a82f095 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java @@ -63,11 +63,11 @@ /** * Defines how a particular field should be indexed and searched - * + *

                          * Configuration {@link Parameter}s for the mapper are defined on a {@link Builder} subclass, * and returned by its {@link Builder#getParameters()} method. Merging, serialization * and parsing of the mapper are all mediated through this set of parameters. - * + *

                          * Subclasses should implement a {@link Builder} that is returned from the * {@link #getMergeBuilder()} method, initialised with the existing builder. * @@ -86,7 +86,7 @@ protected ParametrizedFieldMapper(String simpleName, MappedFieldType mappedField /** * Returns a {@link Builder} to be used for merging and serialization - * + *

                          * Implement as follows: * {@code return new MyBuilder(simpleName()).init(this); } */ @@ -256,7 +256,7 @@ public Parameter acceptsNull() { /** * Adds a deprecated parameter name. - * + *

                          * If this parameter name is encountered during parsing, a deprecation warning will * be emitted. The parameter will be serialized with its main name. */ diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 10f179c964591..05ca7dee0fe4b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -95,7 +95,7 @@ public class RangeFieldMapper extends ParametrizedFieldMapper { */ public static class Defaults { public static final Explicit COERCE = new Explicit<>(true, false); - public static final DateFormatter DATE_FORMATTER = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; + public static final DateFormatter DATE_FORMATTER = DateFieldMapper.getDefaultDateTimeFormatter(); } // this is private since it has a different default diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeType.java b/server/src/main/java/org/opensearch/index/mapper/RangeType.java index c8cd317779c7c..7e29fd417845b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeType.java @@ -313,7 +313,7 @@ public Query rangeQuery( ) { ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; - DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; + DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.getDefaultDateTimeFormatter().toDateMathParser() : parser; boolean roundUp = includeLower == false; // using "gt" should round lower bound up Long low = lowerTerm == null ? minValue() diff --git a/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java index ce3d6eef19af2..9d1ccc9e795ba 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java @@ -72,7 +72,7 @@ public class RootObjectMapper extends ObjectMapper { */ public static class Defaults { public static final DateFormatter[] DYNAMIC_DATE_TIME_FORMATTERS = new DateFormatter[] { - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis") }; public static final boolean DATE_DETECTION = true; public static final boolean NUMERIC_DETECTION = false; diff --git a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java index 16f76f087e403..038066773a360 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java @@ -55,11 +55,11 @@ /** * Mapper for the {@code _seq_no} field. - * + *

                          * We expect to use the seq# for sorting, during collision checking and for * doing range searches. Therefore the {@code _seq_no} field is stored both * as a numeric doc value and as numeric indexed field. - * + *

                          * This mapper also manages the primary term field, which has no OpenSearch named * equivalent. The primary term is only used during collision after receiving * identical seq# values for two document copies. The primary term is stored as diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java index 69f53ba126790..a32d1c9f489ca 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java @@ -46,7 +46,7 @@ * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. Most standard field mappers will use this class * to implement value fetching. - * + *

                          * Field types that handle arrays directly should instead use {@link ArraySourceValueFetcher}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java index d1cea3fe7f1b0..96237b16ea5a4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java @@ -48,11 +48,11 @@ public interface ValueFetcher { /** * Given access to a document's _source, return this field's values. - * + *

                          * In addition to pulling out the values, they will be parsed into a standard form. * For example numeric field mappers make sure to parse the source value into a number * of the right type. - * + *

                          * Note that for array values, the order in which values are returned is undefined and * should not be relied on. * diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index 37fdca8871b18..a284cec247ff1 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.merge; +import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -65,9 +66,9 @@ public class MergeStats implements Writeable, ToXContentFragment { private long totalBytesPerSecAutoThrottle; - public MergeStats() { + private long unreferencedFileCleanUpsPerformed; - } + public MergeStats() {} public MergeStats(StreamInput in) throws IOException { total = in.readVLong(); @@ -81,6 +82,9 @@ public MergeStats(StreamInput in) throws IOException { totalStoppedTimeInMillis = in.readVLong(); totalThrottledTimeInMillis = in.readVLong(); totalBytesPerSecAutoThrottle = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + unreferencedFileCleanUpsPerformed = in.readOptionalVLong(); + } } public void add( @@ -133,6 +137,7 @@ public void addTotals(MergeStats mergeStats) { this.totalSizeInBytes += mergeStats.totalSizeInBytes; this.totalStoppedTimeInMillis += mergeStats.totalStoppedTimeInMillis; this.totalThrottledTimeInMillis += mergeStats.totalThrottledTimeInMillis; + addUnreferencedFileCleanUpStats(mergeStats.unreferencedFileCleanUpsPerformed); if (this.totalBytesPerSecAutoThrottle == Long.MAX_VALUE || mergeStats.totalBytesPerSecAutoThrottle == Long.MAX_VALUE) { this.totalBytesPerSecAutoThrottle = Long.MAX_VALUE; } else { @@ -140,6 +145,14 @@ public void addTotals(MergeStats mergeStats) { } } + public void addUnreferencedFileCleanUpStats(long unreferencedFileCleanUpsPerformed) { + this.unreferencedFileCleanUpsPerformed += unreferencedFileCleanUpsPerformed; + } + + public long getUnreferencedFileCleanUpsPerformed() { + return this.unreferencedFileCleanUpsPerformed; + } + /** * The total number of merges executed. */ @@ -240,6 +253,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString()); } builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle); + builder.field(Fields.UNREFERENCED_FILE_CLEANUPS_PERFORMED, unreferencedFileCleanUpsPerformed); builder.endObject(); return builder; } @@ -267,6 +281,7 @@ static final class Fields { static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES = "total_auto_throttle_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC = "total_auto_throttle"; + static final String UNREFERENCED_FILE_CLEANUPS_PERFORMED = "unreferenced_file_cleanups_performed"; } @Override @@ -282,5 +297,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalStoppedTimeInMillis); out.writeVLong(totalThrottledTimeInMillis); out.writeVLong(totalBytesPerSecAutoThrottle); + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + out.writeOptionalVLong(unreferencedFileCleanUpsPerformed); + } } } diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index 6b8dd08ed0d91..66c6ee115c3f0 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -298,7 +298,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I /** * For internal usage only! - * + *

                          * Extracts the inner hits from the query tree. * While it extracts inner hits, child inner hits are inlined into the inner hit builder they belong to. */ diff --git a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java index a4b75beab26ea..1fade8601e2a6 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -60,7 +60,7 @@ /** * Creates a Lucene query that will filter for all documents that lie within the specified * bounding box. - * + *

                          * This query can only operate on fields of type geo_point that have latitude and longitude * enabled. * diff --git a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java index e029884f32531..33b896a1d5163 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java @@ -55,7 +55,7 @@ /** * Derived {@link AbstractGeometryQueryBuilder} that builds a lat, lon GeoShape Query. It * can be applied to any {@link MappedFieldType} that implements {@link GeoShapeQueryable}. - * + *

                          * GeoJson and WKT shape definitions are supported * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java index 559c084325abb..bb3cd34ae629d 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java +++ b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java @@ -42,7 +42,7 @@ /** * This enum is used to determine how to deal with invalid geo coordinates in geo related * queries: - * + *

                          * On STRICT validation invalid coordinates cause an exception to be thrown. * On IGNORE_MALFORMED invalid coordinates are being accepted. * On COERCE invalid coordinates are being corrected to the most likely valid coordinate. diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index b5cc3238295e0..5a57dfed14f69 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -691,7 +691,7 @@ public static class Regexp extends IntervalsSourceProvider { /** * Constructor - * + *

                          * {@code flags} is Lucene's syntax flags * and {@code caseInsensitive} enables Lucene's only matching flag. */ diff --git a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java index 9a8d2ab104799..7ceb17203e837 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -176,7 +176,7 @@ public String minimumShouldMatch() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java index 844d47070923b..5e9e6a3660e76 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java @@ -209,7 +209,7 @@ public String analyzer() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index 84b30209f31bd..e6472afef2215 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -87,7 +87,7 @@ /** * A more like this query that finds documents that are "like" the provided set of document(s). - * + *

                          * The documents are provided as a set of strings and/or a list of {@link Item}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 1aa013cb24d5e..6227e5d2fa806 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -398,8 +398,8 @@ public int slop() { } @Deprecated - /** - * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". + /* + Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MultiMatchQueryBuilder fuzziness(Object fuzziness) { if (fuzziness != null) { diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 33fe96597b4f8..ac4fde7f06b16 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -318,10 +318,13 @@ protected Query doToQuery(QueryShardContext context) throws IOException { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } + BitSetProducer previousParentFilter = context.getParentFilter(); try { + context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); innerQuery = this.query.toQuery(context); } finally { + context.setParentFilter(previousParentFilter); context.nestedScope().previousLevel(); } diff --git a/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java new file mode 100644 index 0000000000000..3ba13bc7a2da4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.SetOnce; + +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Class to traverse the QueryBuilder tree and capture the query shape + */ +public final class QueryShapeVisitor implements QueryBuilderVisitor { + private final SetOnce queryType = new SetOnce<>(); + private final Map> childVisitors = new EnumMap<>(BooleanClause.Occur.class); + + @Override + public void accept(QueryBuilder qb) { + queryType.set(qb.getName()); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + // Should get called once per Occur value + if (childVisitors.containsKey(occur)) { + throw new IllegalStateException("child visitor already called for " + occur); + } + final List childVisitorList = new ArrayList<>(); + QueryBuilderVisitor childVisitorWrapper = new QueryBuilderVisitor() { + QueryShapeVisitor currentChild; + + @Override + public void accept(QueryBuilder qb) { + currentChild = new QueryShapeVisitor(); + childVisitorList.add(currentChild); + currentChild.accept(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return currentChild.getChildVisitor(occur); + } + }; + childVisitors.put(occur, childVisitorList); + return childVisitorWrapper; + } + + String toJson() { + StringBuilder outputBuilder = new StringBuilder("{\"type\":\"").append(queryType.get()).append("\""); + for (Map.Entry> entry : childVisitors.entrySet()) { + outputBuilder.append(",\"").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append("\"["); + boolean first = true; + for (QueryShapeVisitor child : entry.getValue()) { + if (!first) { + outputBuilder.append(","); + } + outputBuilder.append(child.toJson()); + first = false; + } + outputBuilder.append("]"); + } + outputBuilder.append("}"); + return outputBuilder.toString(); + } + + public String prettyPrintTree(String indent) { + StringBuilder outputBuilder = new StringBuilder(indent).append(queryType.get()).append("\n"); + for (Map.Entry> entry : childVisitors.entrySet()) { + outputBuilder.append(indent).append(" ").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append(":\n"); + for (QueryShapeVisitor child : entry.getValue()) { + outputBuilder.append(child.prettyPrintTree(indent + " ")); + } + } + return outputBuilder.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 0bf05e633bba3..7b248c2a6f3c3 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -115,6 +115,7 @@ public class QueryShardContext extends QueryRewriteContext { private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; private final ValuesSourceRegistry valuesSourceRegistry; + private BitSetProducer parentFilter; public QueryShardContext( int shardId, @@ -509,7 +510,7 @@ public final void freezeContext() { /** * This method fails if {@link #freezeContext()} is called before on this * context. This is used to seal. - * + *

                          * This methods and all methods that call it should be final to ensure that * setting the request as not cacheable and the freezing behaviour of this * class cannot be bypassed. This is important so we can trust when this @@ -622,4 +623,12 @@ public BitsetFilterCache getBitsetFilterCache() { public AggregationUsageService getUsageService() { return valuesSourceRegistry.getUsageService(); } + + public BitSetProducer getParentFilter() { + return parentFilter; + } + + public void setParentFilter(BitSetProducer parentFilter) { + this.parentFilter = parentFilter; + } } diff --git a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java index 469dc51da323a..3d8fbd5fc436d 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java @@ -119,7 +119,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder * Can be changed back to HashMap once https://issues.apache.org/jira/browse/LUCENE-6305 is fixed. */ private final Map fieldsAndWeights = new TreeMap<>(); diff --git a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java index 0d54373112904..fdbef2c732361 100644 --- a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java @@ -504,9 +504,9 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override protected Query doToQuery(QueryShardContext context) throws IOException { if (from == null && to == null) { - /** - * Open bounds on both side, we can rewrite to an exists query - * if the {@link FieldNamesFieldMapper} is enabled. + /* + Open bounds on both side, we can rewrite to an exists query + if the {@link FieldNamesFieldMapper} is enabled. */ final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context .getMapperService() diff --git a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java index 00758309fc0f0..598406b4e45f2 100644 --- a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java +++ b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java @@ -73,7 +73,7 @@ public SearchIndexNameMatcher( /** * Given an index pattern, checks whether it matches against the current shard. - * + *

                          * If this shard represents a remote shard target, then in order to match the pattern contain * the separator ':', and must match on both the cluster alias and index name. */ diff --git a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java index 3cd0554af49a4..57ae7dd0ea5e9 100644 --- a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java @@ -66,7 +66,7 @@ *

                        • '{@code -}' negates a single token: {@code -token0} *
                        • '{@code "}' creates phrases of terms: {@code "term1 term2 ..."} *
                        • '{@code *}' at the end of terms specifies prefix query: {@code term*} - *
                        • '{@code (}' and '{@code)}' specifies precedence: {@code token1 + (token2 | token3)} + *
                        • '{@code (}' and '{@code )}' specifies precedence: {@code token1 + (token2 | token3)} *
                        • '{@code ~}N' at the end of terms specifies fuzzy query: {@code term~1} *
                        • '{@code ~}N' at the end of phrases specifies near/slop query: {@code "term1 term2"~5} *
                        diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 94fd08b99ac58..aebd7d2ea201a 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -10,15 +10,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataCreateIndexService; import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; @@ -27,6 +28,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; @@ -38,15 +40,15 @@ import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Set; +import java.util.Optional; import java.util.function.Function; import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for restoring index data from remote store @@ -136,16 +138,22 @@ public RemoteRestoreResult restore( String[] indexNames ) { Map> indexMetadataMap = new HashMap<>(); + Metadata remoteMetadata = null; boolean metadataFromRemoteStore = (restoreClusterUUID == null || restoreClusterUUID.isEmpty() || restoreClusterUUID.isBlank()) == false; if (metadataFromRemoteStore) { try { - remoteClusterStateService.getLatestIndexMetadata(currentState.getClusterName().value(), restoreClusterUUID) - .values() - .forEach(indexMetadata -> { - indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); - }); + // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to + // restore + if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { + throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + } + logger.info("Restoring cluster state from remote store from cluster UUID : [{}]", restoreClusterUUID); + remoteMetadata = remoteClusterStateService.getLatestMetadata(currentState.getClusterName().value(), restoreClusterUUID); + remoteMetadata.getIndices().values().forEach(indexMetadata -> { + indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); + }); } catch (Exception e) { throw new IllegalStateException("Unable to restore remote index metadata", e); } @@ -154,13 +162,22 @@ public RemoteRestoreResult restore( IndexMetadata indexMetadata = currentState.metadata().index(indexName); if (indexMetadata == null) { logger.warn("Index restore is not supported for non-existent index. Skipping: {}", indexName); + } else if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) == false) { + logger.warn("Remote store is not enabled for index: {}", indexName); + } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster.", + indexName + ) + " Close the existing index." + ); } else { indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); } } } - validate(currentState, indexMetadataMap, restoreClusterUUID, restoreAllShards); - return executeRestore(currentState, indexMetadataMap, restoreAllShards); + return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteMetadata); } /** @@ -173,11 +190,13 @@ public RemoteRestoreResult restore( private RemoteRestoreResult executeRestore( ClusterState currentState, Map> indexMetadataMap, - boolean restoreAllShards + boolean restoreAllShards, + Metadata remoteMetadata ) { final String restoreUUID = UUIDs.randomBase64UUID(); List indicesToBeRestored = new ArrayList<>(); int totalShards = 0; + boolean metadataFromRemoteStore = false; ClusterState.Builder builder = ClusterState.builder(currentState); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); @@ -185,7 +204,7 @@ private RemoteRestoreResult executeRestore( for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { String indexName = indexMetadataEntry.getKey(); IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); - boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); + metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); IndexMetadata updatedIndexMetadata = indexMetadata; if (metadataFromRemoteStore == false && restoreAllShards) { updatedIndexMetadata = IndexMetadata.builder(indexMetadata) @@ -199,117 +218,71 @@ private RemoteRestoreResult executeRestore( IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); - Map indexShardRoutingTableMap = new HashMap<>(); if (metadataFromRemoteStore == false) { - indexShardRoutingTableMap = currentState.routingTable() + Map indexShardRoutingTableMap = currentState.routingTable() .index(indexName) .shards() .values() .stream() .collect(Collectors.toMap(IndexShardRoutingTable::shardId, Function.identity())); + + RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( + restoreUUID, + updatedIndexMetadata.getCreationVersion(), + indexId + ); + + rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, indexShardRoutingTableMap, restoreAllShards); } - RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( - restoreUUID, - updatedIndexMetadata.getCreationVersion(), - indexId - ); - rtBuilder.addAsRemoteStoreRestore( - updatedIndexMetadata, - recoverySource, - indexShardRoutingTableMap, - restoreAllShards || metadataFromRemoteStore - ); blocks.updateBlocks(updatedIndexMetadata); mdBuilder.put(updatedIndexMetadata, true); indicesToBeRestored.add(indexName); totalShards += updatedIndexMetadata.getNumberOfShards(); } + if (remoteMetadata != null) { + restoreGlobalMetadata(mdBuilder, remoteMetadata); + } + RestoreInfo restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); RoutingTable rt = rtBuilder.build(); ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); - return RemoteRestoreResult.build(restoreUUID, restoreInfo, allocationService.reroute(updatedState, "restored from remote store")); + if (metadataFromRemoteStore == false) { + updatedState = allocationService.reroute(updatedState, "restored from remote store"); + } + return RemoteRestoreResult.build(restoreUUID, restoreInfo, updatedState); } - /** - * Performs various validations needed before executing restore - * @param currentState current cluster state - * @param indexMetadataMap map of index metadata to restore - * @param restoreClusterUUID cluster UUID used to restore IndexMetadata - * @param restoreAllShards indicates if all shards of the index needs to be restored. This flat is ignored if remoteClusterUUID is provided - */ - private void validate( - ClusterState currentState, - Map> indexMetadataMap, - @Nullable String restoreClusterUUID, - boolean restoreAllShards - ) throws IllegalStateException, IllegalArgumentException { - String errorMsg = "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster."; - - // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to - // restore - if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { - throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + private void restoreGlobalMetadata(Metadata.Builder mdBuilder, Metadata remoteMetadata) { + if (remoteMetadata.persistentSettings() != null) { + Settings settings = remoteMetadata.persistentSettings(); + clusterService.getClusterSettings().validateUpdate(settings); + mdBuilder.persistentSettings(settings); } - for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { - String indexName = indexMetadataEntry.getKey(); - IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); - String indexUUID = indexMetadata.getIndexUUID(); - boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); - if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { - if (metadataFromRemoteStore) { - Set graveyardIndexNames = new HashSet<>(); - Set graveyardIndexUUID = new HashSet<>(); - Set liveClusterIndexUUIDs = currentState.metadata() - .indices() - .values() - .stream() - .map(IndexMetadata::getIndexUUID) - .collect(Collectors.toSet()); - - currentState.metadata().indexGraveyard().getTombstones().forEach(tombstone -> { - graveyardIndexNames.add(tombstone.getIndex().getName()); - graveyardIndexUUID.add(tombstone.getIndex().getUUID()); - }); - - // Since updates to graveyard are synced to remote we should neven land in a situation where remote contain index - // metadata for graveyard index. - assert graveyardIndexNames.contains(indexName) == false : String.format( - Locale.ROOT, - "Index name [%s] exists in graveyard!", - indexName - ); - assert graveyardIndexUUID.contains(indexUUID) == false : String.format( - Locale.ROOT, - "Index UUID [%s] exists in graveyard!", - indexUUID - ); - - // Any indices being restored from remote cluster state should not already be part of the cluster as this causes - // conflict - boolean sameNameIndexExists = currentState.metadata().hasIndex(indexName); - boolean sameUUIDIndexExists = liveClusterIndexUUIDs.contains(indexUUID); - if (sameNameIndexExists || sameUUIDIndexExists) { - String finalErrorMsg = String.format(Locale.ROOT, errorMsg, indexName); - logger.info(finalErrorMsg); - throw new IllegalStateException(finalErrorMsg); - } - - Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion(); - metadataIndexUpgradeService.upgradeIndexMetadata(indexMetadata, minIndexCompatibilityVersion); - boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(indexMetadata.getSettings()); - createIndexService.validateIndexName(indexName, currentState); - createIndexService.validateDotIndex(indexName, isHidden); - shardLimitValidator.validateShardLimit(indexName, indexMetadata.getSettings(), currentState); - } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { - throw new IllegalStateException(String.format(Locale.ROOT, errorMsg, indexName) + " Close the existing index."); + if (remoteMetadata.templates() != null) { + for (final IndexTemplateMetadata cursor : remoteMetadata.templates().values()) { + mdBuilder.put(cursor); + } + } + if (remoteMetadata.customs() != null) { + for (final Map.Entry cursor : remoteMetadata.customs().entrySet()) { + if (RepositoriesMetadata.TYPE.equals(cursor.getKey()) == false) { + mdBuilder.putCustom(cursor.getKey(), cursor.getValue()); } - } else { - logger.warn("Remote store is not enabled for index: {}", indexName); } } + Optional repositoriesMetadata = Optional.ofNullable(remoteMetadata.custom(RepositoriesMetadata.TYPE)); + repositoriesMetadata = repositoriesMetadata.map( + repositoriesMetadata1 -> new RepositoriesMetadata( + repositoriesMetadata1.repositories() + .stream() + .filter(repository -> SYSTEM_REPOSITORY_SETTING.get(repository.settings()) == false) + .collect(Collectors.toList()) + ) + ); + repositoriesMetadata.ifPresent(metadata -> mdBuilder.putCustom(RepositoriesMetadata.TYPE, metadata)); } /** diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java index f33800659245f..d7c0da4773fff 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java @@ -73,13 +73,13 @@ /** * Task storing information about a currently running BulkByScroll request. - * + *

                        * When the request is not sliced, this task is the only task created, and starts an action to perform search requests. - * + *

                        * When the request is sliced, this task can either represent a coordinating task (using * {@link BulkByScrollTask#setWorkerCount(int)}) or a worker task that performs search queries (using * {@link BulkByScrollTask#setWorker(float, Integer)}). - * + *

                        * We don't always know if this task will be a leader or worker task when it's created, because if slices is set to "auto" it may * be either depending on the number of shards in the source indices. We figure that out when the request is handled and set it on this * class with {@link #setWorkerCount(int)} or {@link #setWorker(float, Integer)}. diff --git a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java index aff9ec1f20e46..4963080f5916c 100644 --- a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java @@ -49,7 +49,7 @@ /** * Creates a new {@link DeleteByQueryRequest} that uses scrolling and bulk requests to delete all documents matching * the query. This can have performance as well as visibility implications. - * + *

                        * Delete-by-query now has the following semantics: *

                          *
                        • it's {@code non-atomic}, a delete-by-query may fail at any time while some documents matching the query have already been diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index c7863536adf20..4228ec60c4524 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -75,6 +76,10 @@ public class RemoteSegmentStats implements Writeable, ToXContentFragment { * Total time spent in downloading segments from remote store */ private long totalDownloadTime; + /** + * Total rejections due to remote store upload backpressure + */ + private long totalRejections; public RemoteSegmentStats() {} @@ -90,6 +95,9 @@ public RemoteSegmentStats(StreamInput in) throws IOException { totalRefreshBytesLag = in.readLong(); totalUploadTime = in.readLong(); totalDownloadTime = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + totalRejections = in.readVLong(); + } } /** @@ -115,6 +123,7 @@ public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { this.totalRefreshBytesLag = trackerStats.bytesLag; this.totalUploadTime = trackerStats.totalUploadTimeInMs; this.totalDownloadTime = trackerStats.directoryFileTransferTrackerStats.totalTransferTimeInMs; + this.totalRejections = trackerStats.rejectionCount; } // Getter and setters. All are visible for testing @@ -207,6 +216,14 @@ public void addTotalDownloadTime(long totalDownloadTime) { this.totalDownloadTime += totalDownloadTime; } + public long getTotalRejections() { + return totalRejections; + } + + public void addTotalRejections(long totalRejections) { + this.totalRejections += totalRejections; + } + /** * Adds existing stats. Used for stats roll-ups at index or node level * @@ -225,6 +242,7 @@ public void add(RemoteSegmentStats existingStats) { this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); this.totalUploadTime += existingStats.getTotalUploadTime(); this.totalDownloadTime += existingStats.getTotalDownloadTime(); + this.totalRejections += existingStats.totalRejections; } } @@ -241,18 +259,25 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalRefreshBytesLag); out.writeLong(totalUploadTime); out.writeLong(totalDownloadTime); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeVLong(totalRejections); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REMOTE_STORE); + builder.startObject(Fields.UPLOAD); buildUploadStats(builder); - builder.endObject(); + builder.endObject(); // UPLOAD + builder.startObject(Fields.DOWNLOAD); buildDownloadStats(builder); - builder.endObject(); - builder.endObject(); + builder.endObject(); // DOWNLOAD + + builder.endObject(); // REMOTE_STORE + return builder; } @@ -261,13 +286,19 @@ private void buildUploadStats(XContentBuilder builder) throws IOException { builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(uploadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(uploadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(uploadBytesFailed)); - builder.endObject(); + builder.endObject(); // TOTAL_UPLOAD_SIZE + builder.startObject(Fields.REFRESH_SIZE_LAG); builder.humanReadableField(Fields.TOTAL_BYTES, Fields.TOTAL, new ByteSizeValue(totalRefreshBytesLag)); builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); - builder.endObject(); + builder.endObject(); // REFRESH_SIZE_LAG + builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalUploadTime)); + + builder.startObject(Fields.PRESSURE); + builder.field(Fields.TOTAL_REJECTIONS, totalRejections); + builder.endObject(); // PRESSURE } private void buildDownloadStats(XContentBuilder builder) throws IOException { @@ -300,6 +331,8 @@ static final class Fields { static final String MAX_BYTES = "max_bytes"; static final String TOTAL_TIME_SPENT = "total_time_spent"; static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; + static final String PRESSURE = "pressure"; + static final String TOTAL_REJECTIONS = "total_rejections"; } @Override @@ -318,7 +351,8 @@ public boolean equals(Object o) { && maxRefreshBytesLag == that.maxRefreshBytesLag && totalRefreshBytesLag == that.totalRefreshBytesLag && totalUploadTime == that.totalUploadTime - && totalDownloadTime == that.totalDownloadTime; + && totalDownloadTime == that.totalDownloadTime + && totalRejections == that.totalRejections; } @Override @@ -334,7 +368,8 @@ public int hashCode() { maxRefreshBytesLag, totalRefreshBytesLag, totalUploadTime, - totalDownloadTime + totalDownloadTime, + totalRejections ); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 05081180bb179..fb65d9ef83be2 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -66,6 +67,12 @@ public class RemoteSegmentTransferTracker extends RemoteTransferTracker { */ private volatile long remoteRefreshTimeMs; + /** + * This is the time of first local refresh after the last successful remote refresh. When the remote store is in + * sync with local refresh, this will be reset to -1. + */ + private volatile long remoteRefreshStartTimeMs = -1; + /** * The refresh time(clock) of most recent remote refresh. */ @@ -76,11 +83,6 @@ public class RemoteSegmentTransferTracker extends RemoteTransferTracker { */ private volatile long refreshSeqNoLag; - /** - * Keeps the time (ms) lag computed so that we do not compute it for every request. - */ - private volatile long timeMsLag; - /** * Keeps track of the total bytes of segment files which were uploaded to remote store during last successful remote refresh */ @@ -132,14 +134,19 @@ public RemoteSegmentTransferTracker( logger = Loggers.getLogger(getClass(), shardId); // Both the local refresh time and remote refresh time are set with current time to give consistent view of time lag when it arises. long currentClockTimeMs = System.currentTimeMillis(); - long currentTimeMs = System.nanoTime() / 1_000_000L; + long currentTimeMs = currentTimeMsUsingSystemNanos(); localRefreshTimeMs = currentTimeMs; remoteRefreshTimeMs = currentTimeMs; + remoteRefreshStartTimeMs = currentTimeMs; localRefreshClockTimeMs = currentClockTimeMs; remoteRefreshClockTimeMs = currentClockTimeMs; this.directoryFileTransferTracker = directoryFileTransferTracker; } + public static long currentTimeMsUsingSystemNanos() { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + } + @Override public void incrementTotalUploadsFailed() { super.incrementTotalUploadsFailed(); @@ -180,19 +187,22 @@ public long getLocalRefreshClockTimeMs() { */ public void updateLocalRefreshTimeAndSeqNo() { updateLocalRefreshClockTimeMs(System.currentTimeMillis()); - updateLocalRefreshTimeMs(System.nanoTime() / 1_000_000L); + updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); updateLocalRefreshSeqNo(getLocalRefreshSeqNo() + 1); } // Visible for testing - void updateLocalRefreshTimeMs(long localRefreshTimeMs) { + synchronized void updateLocalRefreshTimeMs(long localRefreshTimeMs) { assert localRefreshTimeMs >= this.localRefreshTimeMs : "newLocalRefreshTimeMs=" + localRefreshTimeMs + " < " + "currentLocalRefreshTimeMs=" + this.localRefreshTimeMs; + boolean isRemoteInSyncBeforeLocalRefresh = this.localRefreshTimeMs == this.remoteRefreshTimeMs; this.localRefreshTimeMs = localRefreshTimeMs; - computeTimeMsLag(); + if (isRemoteInSyncBeforeLocalRefresh) { + this.remoteRefreshStartTimeMs = localRefreshTimeMs; + } } private void updateLocalRefreshClockTimeMs(long localRefreshClockTimeMs) { @@ -221,14 +231,18 @@ long getRemoteRefreshClockTimeMs() { return remoteRefreshClockTimeMs; } - public void updateRemoteRefreshTimeMs(long remoteRefreshTimeMs) { - assert remoteRefreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" - + remoteRefreshTimeMs + public synchronized void updateRemoteRefreshTimeMs(long refreshTimeMs) { + assert refreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" + + refreshTimeMs + " < " + "currentRemoteRefreshTimeMs=" + this.remoteRefreshTimeMs; - this.remoteRefreshTimeMs = remoteRefreshTimeMs; - computeTimeMsLag(); + this.remoteRefreshTimeMs = refreshTimeMs; + // When multiple refreshes have failed, there is a possibility that retry is ongoing while another refresh gets + // triggered. After the segments have been uploaded and before the below code runs, the updateLocalRefreshTimeAndSeqNo + // method is triggered, which will update the local localRefreshTimeMs. Now, the lag would basically become the + // time since the last refresh happened locally. + this.remoteRefreshStartTimeMs = refreshTimeMs == this.localRefreshTimeMs ? -1 : this.localRefreshTimeMs; } public void updateRemoteRefreshClockTimeMs(long remoteRefreshClockTimeMs) { @@ -243,12 +257,11 @@ public long getRefreshSeqNoLag() { return refreshSeqNoLag; } - private void computeTimeMsLag() { - timeMsLag = localRefreshTimeMs - remoteRefreshTimeMs; - } - public long getTimeMsLag() { - return timeMsLag; + if (remoteRefreshTimeMs == localRefreshTimeMs) { + return 0; + } + return currentTimeMsUsingSystemNanos() - remoteRefreshStartTimeMs; } public long getBytesLag() { @@ -267,7 +280,8 @@ public long getRejectionCount() { return rejectionCount.get(); } - void incrementRejectionCount() { + /** public only for testing **/ + public void incrementRejectionCount() { rejectionCount.incrementAndGet(); } @@ -353,7 +367,7 @@ public RemoteSegmentTransferTracker.Stats stats() { shardId, localRefreshClockTimeMs, remoteRefreshClockTimeMs, - timeMsLag, + getTimeMsLag(), localRefreshSeqNo, remoteRefreshSeqNo, uploadBytesStarted.get(), diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java index 2920b33921869..33cd40f802d43 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java @@ -180,7 +180,6 @@ public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId sh return true; } if (pressureTracker.isUploadTimeMovingAverageReady() == false) { - logger.trace("upload time moving average is not ready"); return true; } long timeLag = pressureTracker.getTimeMsLag(); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java index 864fe24c282a2..e66aa3444c214 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java @@ -30,7 +30,7 @@ static class Defaults { public static final Setting REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED = Setting.boolSetting( "remote_store.segment.pressure.enabled", - false, + true, Setting.Property.Dynamic, Setting.Property.NodeScope ); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 114d07589b0c0..b4c33d781af86 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -8,7 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.common.collect.Tuple; + import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; /** * Utils for remote store @@ -69,4 +75,30 @@ public static String getSegmentName(String filename) { return filename.substring(0, endIdx); } + + /** + * + * @param mdFiles List of segment/translog metadata files + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name + */ + public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { + Map nodesByPrimaryTermAndGen = new HashMap<>(); + mdFiles.forEach(mdFile -> { + Tuple nodeIdByPrimaryTermAndGen = fn.apply(mdFile); + if (nodeIdByPrimaryTermAndGen != null) { + if (nodesByPrimaryTermAndGen.containsKey(nodeIdByPrimaryTermAndGen.v1()) + && (!nodesByPrimaryTermAndGen.get(nodeIdByPrimaryTermAndGen.v1()).equals(nodeIdByPrimaryTermAndGen.v2()))) { + throw new IllegalStateException( + "Multiple metadata files from different nodes" + + "having same primary term and generations " + + nodeIdByPrimaryTermAndGen.v1() + + " detected " + ); + } + nodesByPrimaryTermAndGen.put(nodeIdByPrimaryTermAndGen.v1(), nodeIdByPrimaryTermAndGen.v2()); + } + }); + } + } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java index 1a9896540212e..4214a87049350 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java @@ -232,6 +232,63 @@ public RemoteTranslogTransferTracker.Stats stats() { ); } + @Override + public String toString() { + return "RemoteTranslogTransferStats{" + + "lastSuccessfulUploadTimestamp=" + + lastSuccessfulUploadTimestamp.get() + + "," + + "totalUploadsStarted=" + + totalUploadsStarted.get() + + "," + + "totalUploadsSucceeded=" + + totalUploadsSucceeded.get() + + "," + + "totalUploadsFailed=" + + totalUploadsFailed.get() + + "," + + "uploadBytesStarted=" + + uploadBytesStarted.get() + + "," + + "uploadBytesFailed=" + + uploadBytesFailed.get() + + "," + + "totalUploadTimeInMillis=" + + totalUploadTimeInMillis.get() + + "," + + "uploadBytesMovingAverage=" + + uploadBytesMovingAverageReference.get().getAverage() + + "," + + "uploadBytesPerSecMovingAverage=" + + uploadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "uploadTimeMovingAverage=" + + uploadTimeMsMovingAverageReference.get().getAverage() + + "," + + "lastSuccessfulDownloadTimestamp=" + + lastSuccessfulDownloadTimestamp.get() + + "," + + "totalDownloadsSucceeded=" + + totalDownloadsSucceeded.get() + + "," + + "downloadBytesSucceeded=" + + downloadBytesSucceeded.get() + + "," + + "totalDownloadTimeInMillis=" + + totalDownloadTimeInMillis.get() + + "," + + "downloadBytesMovingAverage=" + + downloadBytesMovingAverageReference.get().getAverage() + + "," + + "downloadBytesPerSecMovingAverage=" + + downloadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "downloadTimeMovingAverage=" + + downloadTimeMsMovingAverageReference.get().getAverage() + + "," + + "}"; + } + /** * Represents the tracker's state as seen in the stats API. * diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index ade4fb1e69586..9e2b79971369d 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -753,9 +753,9 @@ private Query analyzeGraphBoolean(String field, TokenStream source, BooleanClaus lastState = end; final Query queryPos; boolean usePrefix = isPrefix && end == -1; - /** - * check if the GraphTokenStreamFiniteStrings graph is empty - * return empty BooleanQuery result + /* + check if the GraphTokenStreamFiniteStrings graph is empty + return empty BooleanQuery result */ Iterator graphIt = graph.getFiniteStrings(); if (!graphIt.hasNext()) { diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 14aaf7e58a59c..1f9144b28f286 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -242,7 +242,7 @@ private Stats(StreamInput in) throws IOException { pitCurrent = in.readVLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { this.requestStatsLongHolder = new RequestStatsLongHolder(); requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); } @@ -437,7 +437,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitCurrent); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { if (requestStatsLongHolder == null) { requestStatsLongHolder = new RequestStatsLongHolder(); } diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index 0a4c197898d3d..155866e20d007 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -147,7 +147,7 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { /** * Updates the processed sequence checkpoint to the given value. - * + *

                          * This method is only used for segment replication since indexing doesn't * take place on the replica allowing us to avoid the check that all sequence numbers * are consecutively processed. @@ -208,7 +208,7 @@ public long getMaxSeqNo() { /** * constructs a {@link SeqNoStats} object, using local state and the supplied global checkpoint - * + *

                          * This is needed to make sure the persisted local checkpoint and max seq no are consistent */ public synchronized SeqNoStats getStats(final long globalCheckpoint) { diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 4b6d72b86ff62..94f376d923689 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -88,7 +88,7 @@ /** * This class is responsible for tracking the replication group with its progress and safety markers (local and global checkpoints). - * + *

                          * The global checkpoint is the highest sequence number for which all lower (or equal) sequence number have been processed * on all shards that are currently active. Since shards count as "active" when the cluster-manager starts * them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. These shards @@ -112,10 +112,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * checkpoint based on the local checkpoints of all in-sync shard copies. * - replica: this shard receives global checkpoint information from the primary (see * {@link #updateGlobalCheckpointOnReplica(long, String)}). - * + *

                          * When a shard is initialized (be it a primary or replica), it initially operates in replica mode. The global checkpoint tracker is * then switched to primary mode in the following three scenarios: - * + *

                          * - An initializing primary shard that is not a relocation target is moved to primary mode (using {@link #activatePrimaryMode}) once * the shard becomes active. * - An active replica shard is moved to primary mode (using {@link #activatePrimaryMode}) once it is promoted to primary. @@ -140,7 +140,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * in-sync shard copies cannot grow, otherwise the relocation target might miss this information and increase the global checkpoint * to eagerly. As consequence, some of the methods in this class are not allowed to be called while a handoff is in progress, * in particular {@link #markAllocationIdAsInSync}. - * + *

                          * A notable exception to this is the method {@link #updateFromClusterManager}, which is still allowed to be called during a relocation handoff. * The reason for this is that the handoff might fail and can be aborted (using {@link #abortRelocationHandoff}), in which case * it is important that the global checkpoint tracker does not miss any state updates that might happened during the handoff attempt. @@ -1163,7 +1163,7 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI /** * Update the local knowledge of the visible checkpoint for the specified allocation ID. - * + *

                          * This method will also stop timers for each shard and compute replication lag metrics. * * @param allocationId the allocation ID to update the global checkpoint for diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java index f74fc7eefe65c..ca3c7e1d49700 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java @@ -62,6 +62,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -99,7 +100,8 @@ public RetentionLeaseSyncAction( final ShardStateAction shardStateAction, final ActionFilters actionFilters, final IndexingPressureService indexingPressureService, - final SystemIndices systemIndices + final SystemIndices systemIndices, + final Tracer tracer ) { super( settings, @@ -115,7 +117,8 @@ public RetentionLeaseSyncAction( ignore -> ThreadPool.Names.MANAGEMENT, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index d1cb396f55d0f..3017c3ce6dcff 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -155,7 +155,7 @@ default void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {} /** * Called after the index shard has been deleted from disk. - * + *

                          * Note: this method is only called if the deletion of the shard did finish without an exception * * @param shardId The shard id diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d476e8b7c9288..5b6257084e440 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -62,8 +62,6 @@ import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; -import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.DataStream; @@ -162,6 +160,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteStoreFileDownloader; import org.opensearch.index.store.Store; import org.opensearch.index.store.Store.MetadataSnapshot; import org.opensearch.index.store.StoreFileMetadata; @@ -183,6 +182,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -200,7 +200,6 @@ import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -341,6 +340,7 @@ Runnable getGlobalCheckpointSyncer() { private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private final List internalRefreshListener = new ArrayList<>(); + private final RemoteStoreFileDownloader fileDownloader; public IndexShard( final ShardRouting shardRouting, @@ -367,7 +367,9 @@ public IndexShard( @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final Supplier clusterRemoteTranslogBufferIntervalSupplier + final Supplier clusterRemoteTranslogBufferIntervalSupplier, + final String nodeId, + final RecoverySettings recoverySettings ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -413,7 +415,7 @@ public IndexShard( logger.debug("state: [CREATED]"); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); - this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); + this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId); final String aId = shardRouting.allocationId().getId(); final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id()); this.pendingPrimaryTerm = primaryTerm; @@ -463,6 +465,7 @@ public boolean shouldCache(Query query) { ? false : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); } public ThreadPool getThreadPool() { @@ -556,6 +559,14 @@ protected RemoteStoreStatsTrackerFactory getRemoteStoreStatsTrackerFactory() { return remoteStoreStatsTrackerFactory; } + public String getNodeId() { + return translogConfig.getNodeId(); + } + + public RemoteStoreFileDownloader getFileDownloader() { + return fileDownloader; + } + @Override public void updateShardState( final ShardRouting newRouting, @@ -629,7 +640,7 @@ public void updateShardState( if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) { // the cluster-manager started a recovering primary, activate primary mode. replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); } } else { assert currentRouting.primary() == false : "term is only increased as part of primary promotion"; @@ -700,8 +711,7 @@ public void updateShardState( // are brought up to date. checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); } - - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); /* * If this shard was serving as a replica shard when another shard was promoted to primary then * its Lucene index was reset during the primary term transition. In particular, the Lucene index @@ -1383,7 +1393,9 @@ public MergeStats mergeStats() { if (engine == null) { return new MergeStats(); } - return engine.getMergeStats(); + final MergeStats mergeStats = engine.getMergeStats(); + mergeStats.addUnreferencedFileCleanUpStats(engine.unreferencedFileCleanUpsPerformed()); + return mergeStats; } public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { @@ -1451,6 +1463,9 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { + if (isRemoteTranslogEnabled()) { + return; + } verifyNotClosed(); final Engine engine = getEngine(); engine.translogManager().trimUnreferencedTranslogFiles(); @@ -1459,7 +1474,7 @@ public void trimTranslog() { /** * Rolls the tranlog generation and cleans unneeded. */ - public void rollTranslogGeneration() { + public void rollTranslogGeneration() throws IOException { final Engine engine = getEngine(); engine.translogManager().rollTranslogGeneration(); } @@ -1592,8 +1607,11 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } /** - * Compute and return the latest ReplicationCheckpoint for a particular shard. - * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices + * return the most recently computed ReplicationCheckpoint for a particular shard. + * The checkpoint is updated inside a refresh listener and may lag behind the SegmentInfos on the reader. + * To guarantee the checkpoint is upto date with the latest on-reader infos, use `getLatestSegmentInfosAndCheckpoint` instead. + * + * @return {@link ReplicationCheckpoint} - The most recently computed ReplicationCheckpoint. */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { return replicationTracker.getLatestReplicationCheckpoint(); @@ -1602,7 +1620,7 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { /** * Compute and return the latest ReplicationCheckpoint for a shard and a GatedCloseable containing the corresponding SegmentInfos. * The segments referenced by the SegmentInfos will remain on disk until the GatedCloseable is closed. - * + *

                          * Primary shards compute the seqNo used in the replication checkpoint from the fetched SegmentInfos. * Replica shards compute the seqNo from its latest processed checkpoint, which only increases when refreshing on new segments. * @@ -1612,34 +1630,12 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { public Tuple, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { assert indexSettings.isSegRepEnabled(); - Tuple, ReplicationCheckpoint> nullSegmentInfosEmptyCheckpoint = new Tuple<>( - new GatedCloseable<>(null, () -> {}), - getLatestReplicationCheckpoint() - ); - - if (getEngineOrNull() == null) { - return nullSegmentInfosEmptyCheckpoint; - } // do not close the snapshot - caller will close it. GatedCloseable snapshot = null; try { snapshot = getSegmentInfosSnapshot(); - if (snapshot.get() != null) { - SegmentInfos segmentInfos = snapshot.get(); - final Map metadataMap = store.getSegmentMetadataMap(segmentInfos); - return new Tuple<>( - snapshot, - new ReplicationCheckpoint( - this.shardId, - getOperationPrimaryTerm(), - segmentInfos.getGeneration(), - segmentInfos.getVersion(), - metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(), - getEngine().config().getCodec().getName(), - metadataMap - ) - ); - } + final SegmentInfos segmentInfos = snapshot.get(); + return new Tuple<>(snapshot, computeReplicationCheckpoint(segmentInfos)); } catch (IOException | AlreadyClosedException e) { logger.error("Error Fetching SegmentInfos and latest checkpoint", e); if (snapshot != null) { @@ -1650,7 +1646,39 @@ public Tuple, ReplicationCheckpoint> getLatestSegme } } } - return nullSegmentInfosEmptyCheckpoint; + return new Tuple<>(new GatedCloseable<>(null, () -> {}), getLatestReplicationCheckpoint()); + } + + /** + * Compute the latest {@link ReplicationCheckpoint} from a SegmentInfos. + * This function fetches a metadata snapshot from the store that comes with an IO cost. + * We will reuse the existing stored checkpoint if it is at the same SI version. + * + * @param segmentInfos {@link SegmentInfos} infos to use to compute. + * @return {@link ReplicationCheckpoint} Checkpoint computed from the infos. + * @throws IOException When there is an error computing segment metadata from the store. + */ + ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) throws IOException { + if (segmentInfos == null) { + return ReplicationCheckpoint.empty(shardId); + } + final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint(); + if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion() + && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration()) { + return latestReplicationCheckpoint; + } + final Map metadataMap = store.getSegmentMetadataMap(segmentInfos); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + this.shardId, + getOperationPrimaryTerm(), + segmentInfos.getGeneration(), + segmentInfos.getVersion(), + metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(), + getEngine().config().getCodec().getName(), + metadataMap + ); + logger.trace("Recomputed ReplicationCheckpoint for shard {}", checkpoint); + return checkpoint; } /** @@ -2300,7 +2328,7 @@ public void openEngineAndRecoverFromTranslog() throws IOException { }; // Do not load the global checkpoint if this is a remote snapshot index - if (indexSettings.isRemoteSnapshot() == false) { + if (indexSettings.isRemoteSnapshot() == false && indexSettings.isRemoteTranslogStoreEnabled() == false) { loadGlobalCheckpointToReplicationTracker(); } @@ -2981,7 +3009,7 @@ public ReplicationStats getReplicationStats() { long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); long maxReplicationLag = stats.stream() - .mapToLong(SegmentReplicationShardStats::getCurrentReplicationTimeMillis) + .mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis) .max() .orElse(0L); return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag); @@ -3364,6 +3392,20 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingE synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } + postActivatePrimaryMode(); + } + + private void postActivatePrimaryMode() { + if (indexSettings.isRemoteStoreEnabled()) { + // We make sure to upload translog (even if it does not contain any operations) to remote translog. + // This helps to get a consistent state in remote store where both remote segment store and remote + // translog contains data. + try { + getEngine().translogManager().syncTranslog(); + } catch (IOException e) { + logger.error("Failed to sync translog to remote from new primary", e); + } + } ensurePeerRecoveryRetentionLeasesExist(); } @@ -3758,9 +3800,9 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro ); } - /** - * With segment replication enabled for primary relocation, recover replica shard initially as read only and - * change to a writeable engine during relocation handoff after a round of segment replication. + /* + With segment replication enabled for primary relocation, recover replica shard initially as read only and + change to a writeable engine during relocation handoff after a round of segment replication. */ boolean isReadOnlyReplica = indexSettings.isSegRepEnabled() && (shardRouting.primary() == false @@ -3772,7 +3814,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro indexSettings, warmer, store, - indexSettings.getMergePolicy(), + indexSettings.getMergePolicy(isTimeSeriesIndex), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), @@ -4396,15 +4438,19 @@ public final boolean isSearchIdle() { } /** - * * Returns true if this shard supports search idle. - * + *

                          * Indices using Segment Replication will ignore search idle unless there are no replicas. * Primary shards push out new segments only * after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving * a new set of segments. */ public final boolean isSearchIdleSupported() { + // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh + // task continues to upload to remote store periodically. + if (isRemoteTranslogEnabled()) { + return false; + } return indexSettings.isSegRepEnabled() == false || indexSettings.getNumberOfReplicas() == 0; } @@ -4741,6 +4787,8 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOE * @throws IOException if exception occurs while reading segments from remote store. */ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { + boolean syncSegmentSuccess = false; + long startTimeMs = System.currentTimeMillis(); assert indexSettings.isRemoteStoreEnabled(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); @@ -4790,9 +4838,15 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runn : "There should not be any segments file in the dir"; store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } + syncSegmentSuccess = true; } catch (IOException e) { throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); } finally { + logger.trace( + "syncSegmentsFromRemoteSegmentStore success={} elapsedTime={}", + syncSegmentSuccess, + (System.currentTimeMillis() - startTimeMs) + ); store.decRef(); remoteStore.decRef(); } @@ -4895,7 +4949,7 @@ private String copySegmentFiles( if (toDownloadSegments.isEmpty() == false) { try { - downloadSegments(storeDirectory, sourceRemoteDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); + fileDownloader.download(sourceRemoteDirectory, storeDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); } catch (Exception e) { throw new IOException("Error occurred when downloading segments from remote store", e); } @@ -4908,33 +4962,8 @@ private String copySegmentFiles( return segmentNFile; } - private void downloadSegments( - Directory storeDirectory, - RemoteSegmentStoreDirectory sourceRemoteDirectory, - RemoteSegmentStoreDirectory targetRemoteDirectory, - Set toDownloadSegments, - final Runnable onFileSync - ) { - final PlainActionFuture completionListener = PlainActionFuture.newFuture(); - final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( - ActionListener.map(completionListener, v -> null), - toDownloadSegments.size() - ); - - final ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, fileName -> { - onFileSync.run(); - if (targetRemoteDirectory != null) { - targetRemoteDirectory.copyFrom(storeDirectory, fileName, fileName, IOContext.DEFAULT); - } - return null; - }); - - final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); - toDownloadSegments.forEach(file -> { sourceRemoteDirectory.copyTo(file, storeDirectory, indexPath, segmentsDownloadListener); }); - completionListener.actionGet(); - } - - private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { + // Visible for testing + boolean localDirectoryContains(Directory localDirectory, String file, long checksum) throws IOException { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { return true; @@ -4953,6 +4982,8 @@ private boolean localDirectoryContains(Directory localDirectory, String file, lo logger.debug("File {} does not exist in local FS, downloading from remote store", file); } catch (IOException e) { logger.warn("Exception while reading checksum of file: {}, this can happen if file is corrupted", file); + // For any other exception on reading checksum, we delete the file to re-download again + localDirectory.deleteFile(file); } return false; } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index f1abea81a6511..89cbc59403faf 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -172,7 +172,7 @@ public Stats(StreamInput in) throws IOException { isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { docStatusStats = in.readOptionalWriteable(DocStatusStats::new); } else { docStatusStats = null; @@ -308,7 +308,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { out.writeOptionalWriteable(docStatusStats); } } diff --git a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java index 7dbbcbb2d7d20..803db773efe6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java +++ b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java @@ -54,7 +54,7 @@ /** * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. - * + *

                          * When {@link Closeable#close()}d it will no longer accept listeners and flush any existing listeners. * * @opensearch.internal @@ -86,7 +86,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener, * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed * from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle * are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}. - * + *

                          * We never set this to non-null while closed it {@code true}. */ private volatile List>> refreshListeners = null; diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 695c01367171a..464adc88ae16f 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -86,7 +86,7 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL private final RemoteSegmentStoreDirectory remoteDirectory; private final RemoteSegmentTransferTracker segmentTracker; private final Map localSegmentChecksumMap; - private long primaryTerm; + private volatile long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; @@ -123,14 +123,12 @@ public void beforeRefresh() throws IOException {} @Override protected void runAfterRefreshExactlyOnce(boolean didRefresh) { - if (shouldSync(didRefresh)) { - segmentTracker.updateLocalRefreshTimeAndSeqNo(); + // We have 2 separate methods to check if sync needs to be done or not. This is required since we use the return boolean + // from isReadyForUpload to schedule refresh retries as the index shard or the primary mode are not in complete + // ready state. + if (shouldSync(didRefresh, true) && isReadyForUpload()) { try { - if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { - logger.debug("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); - this.primaryTerm = indexShard.getOperationPrimaryTerm(); - this.remoteDirectory.init(); - } + segmentTracker.updateLocalRefreshTimeAndSeqNo(); try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { Collection localSegmentsPostRefresh = segmentInfosGatedCloseable.get().files(true); updateLocalSizeMapAndTracker(localSegmentsPostRefresh); @@ -151,7 +149,7 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { boolean successful; - if (shouldSync(didRefresh)) { + if (shouldSync(didRefresh, false)) { successful = syncSegments(); } else { successful = true; @@ -159,21 +157,30 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { return successful; } - private boolean shouldSync(boolean didRefresh) { - // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it - // is important to upload the zero state segments so that the restore does not break. - return this.primaryTerm != indexShard.getOperationPrimaryTerm() - || didRefresh - || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty(); + /** + * This checks if there is a sync required to remote. + * + * @param didRefresh if the readers changed. + * @param skipPrimaryTermCheck consider change in primary term or not for should sync + * @return true if sync is needed + */ + private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { + boolean shouldSync = didRefresh // If the readers change, didRefresh is always true. + // The third condition exists for uploading the zero state segments where the refresh has not changed the reader + // reference, but it is important to upload the zero state segments so that the restore does not break. + || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty() + // When the shouldSync is called the first time, then 1st condition on primary term is true. But after that + // we update the primary term and the same condition would not evaluate to true again in syncSegments. + // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. + || isRefreshAfterCommitSafe(); + if (shouldSync || skipPrimaryTermCheck) { + return shouldSync; + } + return this.primaryTerm != indexShard.getOperationPrimaryTerm(); } private boolean syncSegments() { - if (indexShard.getReplicationTracker().isPrimaryMode() == false || indexShard.state() == IndexShardState.CLOSED) { - logger.debug( - "Skipped syncing segments with primaryMode={} indexShardState={}", - indexShard.getReplicationTracker().isPrimaryMode(), - indexShard.state() - ); + if (isReadyForUpload() == false) { // Following check is required to enable retry and make sure that we do not lose this refresh event // When primary shard is restored from remote store, the recovery happens first followed by changing // primaryMode to true. Due to this, the refresh that is triggered post replay of translog will not go through @@ -181,7 +188,6 @@ private boolean syncSegments() { // in the remote store. return indexShard.state() != IndexShardState.STARTED || !(indexShard.getEngine() instanceof InternalEngine); } - ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); beforeSegmentsSync(); long refreshTimeMs = segmentTracker.getLocalRefreshTimeMs(), refreshClockTimeMs = segmentTracker.getLocalRefreshClockTimeMs(); long refreshSeqNo = segmentTracker.getLocalRefreshSeqNo(); @@ -190,6 +196,7 @@ private boolean syncSegments() { try { try { + initializeRemoteDirectoryOnTermUpdate(); // if a new segments_N file is present in local that is not uploaded to remote store yet, it // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. @@ -199,10 +206,7 @@ private boolean syncSegments() { try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); - assert segmentInfos.getGeneration() == checkpoint.getSegmentsGen() : "SegmentInfos generation: " - + segmentInfos.getGeneration() - + " does not match metadata generation: " - + checkpoint.getSegmentsGen(); + final ReplicationCheckpoint checkpoint = indexShard.computeReplicationCheckpoint(segmentInfos); // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); @@ -327,6 +331,19 @@ private boolean isRefreshAfterCommit() throws IOException { && !remoteDirectory.containsFile(lastCommittedLocalSegmentFileName, getChecksumOfLocalFile(lastCommittedLocalSegmentFileName))); } + /** + * Returns if the current refresh has happened after a commit. + * @return true if this refresh has happened on account of a commit. If otherwise or exception, returns false. + */ + private boolean isRefreshAfterCommitSafe() { + try { + return isRefreshAfterCommit(); + } catch (Exception e) { + logger.info("Exception occurred in isRefreshAfterCommitSafe", e); + } + return false; + } + void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos segmentInfos, ReplicationCheckpoint replicationCheckpoint) throws IOException { final long maxSeqNo = ((InternalEngine) indexShard.getEngine()).currentOngoingRefreshCheckpoint(); @@ -346,7 +363,8 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se segmentInfosSnapshot, storeDirectory, translogFileGeneration, - replicationCheckpoint + replicationCheckpoint, + indexShard.getNodeId() ); } } @@ -442,6 +460,48 @@ private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesB } } + /** + * On primary term update, we (re)initialise the remote segment directory to reflect the latest metadata file that + * has been uploaded to remote store successfully. This method also updates the segment tracker about the latest + * uploaded segment files onto remote store. + */ + private void initializeRemoteDirectoryOnTermUpdate() throws IOException { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + logger.trace("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + RemoteSegmentMetadata uploadedMetadata = this.remoteDirectory.init(); + + // During failover, the uploaded metadata would have names of files that have been uploaded to remote store. + // Here we update the tracker with latest remote uploaded files. + if (uploadedMetadata != null) { + segmentTracker.setLatestUploadedFiles(uploadedMetadata.getMetadata().keySet()); + } + } + } + + /** + * This checks for readiness of the index shard and primary mode. This has separated from shouldSync since we use the + * returned value of this method for scheduling retries in syncSegments method. + * @return true iff primaryMode is true and index shard is not in closed state. + */ + private boolean isReadyForUpload() { + boolean isReady = indexShard.getReplicationTracker().isPrimaryMode() && indexShard.state() != IndexShardState.CLOSED; + if (isReady == false) { + StringBuilder sb = new StringBuilder("Skipped syncing segments with"); + if (indexShard.getReplicationTracker() != null) { + sb.append(" primaryMode=").append(indexShard.getReplicationTracker().isPrimaryMode()); + } + if (indexShard.state() != null) { + sb.append(" indexShardState=").append(indexShard.state()); + } + if (indexShard.getEngineOrNull() != null) { + sb.append(" engineType=").append(indexShard.getEngine().getClass().getSimpleName()); + } + logger.trace(sb.toString()); + } + return isReady; + } + /** * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events */ diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java index ad64f3a55228f..085e93c794fb7 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java @@ -70,23 +70,23 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SimilarityProvider that = (SimilarityProvider) o; - /** - * We check name only because the similarity is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities inside the same index and names are unique in this case. - **/ + /* + We check name only because the similarity is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities inside the same index and names are unique in this case. + */ return Objects.equals(name, that.name); } @Override public int hashCode() { - /** - * We use name only because the similarity is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities a single index and names are unique in this case. - **/ + /* + We use name only because the similarity is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities a single index and names are unique in this case. + */ return Objects.hash(name); } } diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java index eefc1469a06a0..aa5d90cc65803 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java @@ -322,10 +322,10 @@ public String snapshot() { return snapshot; } - /** - * Returns list of files in the shard - * - * @return list of files + /* + Returns list of files in the shard + + @return list of files */ /** diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 594b7f99cd85a..345583bbbd1be 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -39,7 +39,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -62,9 +61,9 @@ public class RemoteDirectory extends Directory { protected final BlobContainer blobContainer; private static final Logger logger = LogManager.getLogger(RemoteDirectory.class); - protected final UnaryOperator uploadRateLimiter; + private final UnaryOperator uploadRateLimiter; - protected final UnaryOperator downloadRateLimiter; + private final UnaryOperator downloadRateLimiter; /** * Number of bytes in the segment file to store checksum @@ -193,10 +192,14 @@ public IndexOutput createOutput(String name, IOContext context) { */ @Override public IndexInput openInput(String name, IOContext context) throws IOException { + return openInput(name, fileLength(name), context); + } + + public IndexInput openInput(String name, long fileLength, IOContext context) throws IOException { InputStream inputStream = null; try { inputStream = blobContainer.readBlob(name); - return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength(name)); + return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength); } catch (Exception e) { // Incase the RemoteIndexInput creation fails, close the input stream to avoid file handler leak. if (inputStream != null) { @@ -230,9 +233,9 @@ public void close() throws IOException { @Override public long fileLength(String name) throws IOException { // ToDo: Instead of calling remote store each time, keep a cache with segment metadata - Map metadata = blobContainer.listBlobsByPrefix(name); - if (metadata.containsKey(name)) { - return metadata.get(name).length(); + List metadata = blobContainer.listBlobsByPrefixInSortedOrder(name, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + if (metadata.size() == 1 && metadata.get(0).name().equals(name)) { + return metadata.get(0).length(); } throw new NoSuchFileException(name); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index b23d2d7d0a3f8..be1f2341236ab 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -24,7 +24,7 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.Version; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -43,7 +43,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -51,6 +50,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -114,6 +114,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final AtomicLong metadataUploadCounter = new AtomicLong(0); + public static final int METADATA_FILES_TO_FETCH = 10; + public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, @@ -187,9 +189,11 @@ public RemoteSegmentMetadata readLatestMetadataFile() throws IOException { List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ); + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + if (metadataFiles.isEmpty() == false) { String latestMetadataFile = metadataFiles.get(0); logger.trace("Reading latest Metadata file {}", latestMetadataFile); @@ -306,12 +310,13 @@ static String getMetadataFilePrefixForCommit(long primaryTerm, long generation) } // Visible for testing - static String getMetadataFilename( + public static String getMetadataFilename( long primaryTerm, long generation, long translogGeneration, long uploadCounter, - int metadataVersion + int metadataVersion, + String nodeId ) { return String.join( SEPARATOR, @@ -320,6 +325,7 @@ static String getMetadataFilename( RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), + String.valueOf(Objects.hash(nodeId)), RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(metadataVersion) ); @@ -334,6 +340,19 @@ static long getPrimaryTerm(String[] filenameTokens) { static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(SEPARATOR); + if (tokens.length < 8) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(SEPARATOR, tokens[1], tokens[2], tokens[3]); + + String nodeId = tokens[5]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + } /** @@ -407,8 +426,9 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti @Override public IndexInput openInput(String name, IOContext context) throws IOException { String remoteFilename = getExistingRemoteFilename(name); + long fileLength = fileLength(name); if (remoteFilename != null) { - return remoteDataDirectory.openInput(remoteFilename, context); + return remoteDataDirectory.openInput(remoteFilename, fileLength, context); } else { throw new NoSuchFileException(name); } @@ -445,41 +465,6 @@ public void copyFrom(Directory from, String src, IOContext context, ActionListen } } - /** - * Copies an existing {@code source} file from this directory to a non-existent file (also - * named {@code source}) in either {@code destinationDirectory} or {@code destinationPath}. - * If the blob container backing this directory supports multipart downloads, the {@code source} - * file will be downloaded (potentially in multiple concurrent parts) directly to - * {@code destinationPath}. This method will return immediately and {@code fileCompletionListener} - * will be notified upon completion. - *

                          - * If multipart downloads are not supported, then {@code source} file will be copied to a file named - * {@code source} in a single part to {@code destinationDirectory}. The download will happen on the - * calling thread and {@code fileCompletionListener} will be notified synchronously before this - * method returns. - * - * @param source The source file name - * @param destinationDirectory The destination directory (if multipart is not supported) - * @param destinationPath The destination path (if multipart is supported) - * @param fileCompletionListener The listener to notify of completion - */ - public void copyTo(String source, Directory destinationDirectory, Path destinationPath, ActionListener fileCompletionListener) { - final String blobName = getExistingRemoteFilename(source); - if (destinationPath != null && remoteDataDirectory.getBlobContainer() instanceof AsyncMultiStreamBlobContainer) { - final AsyncMultiStreamBlobContainer blobContainer = (AsyncMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer(); - final Path destinationFilePath = destinationPath.resolve(source); - blobContainer.asyncBlobDownload(blobName, destinationFilePath, threadPool, fileCompletionListener); - } else { - // Fallback to older mechanism of downloading the file - try { - destinationDirectory.copyFrom(this, source, source, IOContext.DEFAULT); - fileCompletionListener.onResponse(source); - } catch (IOException e) { - fileCompletionListener.onFailure(e); - } - } - } - /** * This acquires a lock on a given commit by creating a lock file in lock directory using {@code FileLockInfo} * @@ -593,6 +578,7 @@ public boolean containsFile(String localFilename, String checksum) { * @param storeDirectory instance of local directory to temporarily create metadata file before upload * @param translogGeneration translog generation * @param replicationCheckpoint ReplicationCheckpoint of primary shard + * @param nodeId node id * @throws IOException in case of I/O error while uploading the metadata file */ public void uploadMetadata( @@ -600,7 +586,8 @@ public void uploadMetadata( SegmentInfos segmentInfosSnapshot, Directory storeDirectory, long translogGeneration, - ReplicationCheckpoint replicationCheckpoint + ReplicationCheckpoint replicationCheckpoint, + String nodeId ) throws IOException { synchronized (this) { String metadataFilename = MetadataFilenameUtils.getMetadataFilename( @@ -608,7 +595,8 @@ public void uploadMetadata( segmentInfosSnapshot.getGeneration(), translogGeneration, metadataUploadCounter.incrementAndGet(), - RemoteSegmentMetadata.CURRENT_VERSION + RemoteSegmentMetadata.CURRENT_VERSION, + nodeId ); try { try (IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT)) { diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 490b07e441702..a5e89ec6a8327 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -76,8 +76,4 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } - - private RemoteDirectory createRemoteDirectory(BlobStoreRepository repository, BlobPath commonBlobPath, String extension) { - return new RemoteDirectory(repository.blobStore().blobContainer(commonBlobPath.add(extension))); - } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java new file mode 100644 index 0000000000000..727c57afd289b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; + +/** + * Helper class to downloads files from a {@link RemoteSegmentStoreDirectory} + * instance to a local {@link Directory} instance in parallel depending on thread + * pool size and recovery settings. + */ +@InternalApi +public final class RemoteStoreFileDownloader { + private final Logger logger; + private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + + public RemoteStoreFileDownloader(ShardId shardId, ThreadPool threadPool, RecoverySettings recoverySettings) { + this.logger = Loggers.getLogger(RemoteStoreFileDownloader.class, shardId); + this.threadPool = threadPool; + this.recoverySettings = recoverySettings; + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param toDownloadSegments The list of segment files to download + * @param listener Callback listener to be notified upon completion + */ + public void downloadAsync( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + Collection toDownloadSegments, + ActionListener listener + ) { + downloadInternal(cancellableThreads, source, destination, null, toDownloadSegments, () -> {}, listener); + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory, while also copying the segments _to_ another remote directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param secondDestination The second remote directory that segment files are + * copied to after being copied to the local directory + * @param toDownloadSegments The list of segment files to download + * @param onFileCompletion A generic runnable that is invoked after each file download. + * Must be thread safe as this may be invoked concurrently from + * different threads. + */ + public void download( + Directory source, + Directory destination, + Directory secondDestination, + Collection toDownloadSegments, + Runnable onFileCompletion + ) throws InterruptedException, IOException { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture listener = PlainActionFuture.newFuture(); + downloadInternal(cancellableThreads, source, destination, secondDestination, toDownloadSegments, onFileCompletion, listener); + try { + listener.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } + throw new RuntimeException(e); + } catch (InterruptedException e) { + // If the blocking call on the PlainActionFuture itself is interrupted, then we must + // cancel the asynchronous work we were waiting on + cancellableThreads.cancel(e.getMessage()); + Thread.currentThread().interrupt(); + throw e; + } + } + + private void downloadInternal( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Collection toDownloadSegments, + Runnable onFileCompletion, + ActionListener listener + ) { + final Queue queue = new ConcurrentLinkedQueue<>(toDownloadSegments); + // Choose the minimum of: + // - number of files to download + // - max thread pool size + // - "indices.recovery.max_concurrent_remote_store_streams" setting + final int threads = Math.min( + toDownloadSegments.size(), + Math.min(threadPool.info(ThreadPool.Names.REMOTE_RECOVERY).getMax(), recoverySettings.getMaxConcurrentRemoteStoreStreams()) + ); + logger.trace("Starting download of {} files with {} threads", queue.size(), threads); + final ActionListener allFilesListener = new GroupedActionListener<>(ActionListener.map(listener, r -> null), threads); + for (int i = 0; i < threads; i++) { + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, allFilesListener); + } + } + + private void copyOneFile( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Queue queue, + Runnable onFileCompletion, + ActionListener listener + ) { + final String file = queue.poll(); + if (file == null) { + // Queue is empty, so notify listener we are done + listener.onResponse(null); + } else { + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY).submit(() -> { + logger.trace("Downloading file {}", file); + try { + cancellableThreads.executeIO(() -> { + destination.copyFrom(source, file, file, IOContext.DEFAULT); + onFileCompletion.run(); + if (secondDestination != null) { + secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); + } + }); + } catch (Exception e) { + // Clear the queue to stop any future processing, report the failure, then return + queue.clear(); + listener.onFailure(e); + return; + } + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, listener); + }); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 285241ba89996..b822742de6e97 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -299,14 +299,15 @@ final void ensureOpen() { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. - * + *

                          * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + *

                          * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed - * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the + * + * @param commit the index commit to read the snapshot from or {@code null} if the latest snapshot should be read from the * directory * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an * unexpected exception when opening the index reading the segments file. @@ -330,10 +331,10 @@ public MetadataSnapshot getMetadata() throws IOException { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. - * + *

                          * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + *

                          * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed @@ -385,7 +386,13 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio */ public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { assert indexSettings.isSegRepEnabled(); - return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + failIfCorrupted(); + try { + return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + } catch (NoSuchFileException | CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { + markStoreCorrupted(ex); + throw ex; + } } /** @@ -800,7 +807,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr /** * Segment replication method - * + *

                          * This method takes the segment info bytes to build SegmentInfos. It inc'refs files pointed by passed in SegmentInfos * bytes to ensure they are not deleted. * @@ -875,7 +882,7 @@ public void beforeClose() { * have the ability to create a writer directly from a SegmentInfos object. To promote the replica as a primary and avoid reindexing, we must first commit * on the replica so that it can be opened with a writeable engine. Further, InternalEngine currently invokes `trimUnsafeCommits` which reverts the engine to a previous safeCommit where the max seqNo is less than or equal * to the current global checkpoint. It is likely that the replica has a maxSeqNo that is higher than the global cp and a new commit will be wiped. - * + *

                          * To get around these limitations, this method first creates an IndexCommit directly from SegmentInfos, it then * uses an appending IW to create an IndexCommit from the commit created on SegmentInfos. * This ensures that 1. All files in the new commit are fsynced and 2. Deletes older commit points so the only commit to start from is our new commit. diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java index 24f42743e1a04..b6be60c489a6c 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java @@ -21,6 +21,7 @@ public class FileLockInfo implements LockInfo { private String fileToLock; private String acquirerId; + private static final int INVALID_INDEX = -1; public String getAcquirerId() { return acquirerId; @@ -88,21 +89,34 @@ static String generateLockName(String fileToLock, String acquirerId) { } public static String getFileToLockNameFromLock(String lockName) { - String[] lockNameTokens = lockName.split(RemoteStoreLockManagerUtils.SEPARATOR); - - if (lockNameTokens.length != 2) { - throw new IllegalArgumentException("Provided Lock Name " + lockName + " is not Valid."); + // use proper separator for the lock file depending on the version it is created + String lockSeparator = lockName.endsWith(RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION) + ? RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR + : RemoteStoreLockManagerUtils.SEPARATOR; + final int indexOfSeparator = lockName.lastIndexOf(lockSeparator); + if (indexOfSeparator == INVALID_INDEX) { + throw new IllegalArgumentException("Provided lock name: " + lockName + " is invalid with separator: " + lockSeparator); } - return lockNameTokens[0]; + return lockName.substring(0, indexOfSeparator); } public static String getAcquirerIdFromLock(String lockName) { - String[] lockNameTokens = lockName.split(RemoteStoreLockManagerUtils.SEPARATOR); + String lockExtension = RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION; + String lockSeparator = RemoteStoreLockManagerUtils.SEPARATOR; - if (lockNameTokens.length != 2) { - throw new IllegalArgumentException("Provided Lock Name " + lockName + " is not Valid."); + // check if lock file is created on version <=2.10 + if (lockName.endsWith(RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION)) { + lockSeparator = RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR; + lockExtension = RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION; + } + final int indexOfSeparator = lockName.lastIndexOf(lockSeparator); + final int indexOfExt = lockName.lastIndexOf(lockExtension); + if (indexOfSeparator == INVALID_INDEX || indexOfExt == INVALID_INDEX) { + throw new IllegalArgumentException( + "Provided lock name: " + lockName + " is invalid with separator: " + lockSeparator + " and extension: " + lockExtension + ); } - return lockNameTokens[1].replace(RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION, ""); + return lockName.substring(indexOfSeparator + lockSeparator.length(), indexOfExt); } } diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java index 452dfc329d88b..d5fb2722a64dc 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java @@ -15,8 +15,11 @@ */ public class RemoteStoreLockManagerUtils { static final String FILE_TO_LOCK_NAME = "file_to_lock"; - static final String SEPARATOR = "___"; - static final String LOCK_FILE_EXTENSION = ".lock"; + static final String PRE_OS210_LOCK_SEPARATOR = "___"; + static final String SEPARATOR = "..."; + // for versions <= 2.10, we have lock files with this extension. + static final String PRE_OS210_LOCK_FILE_EXTENSION = ".lock"; + static final String LOCK_FILE_EXTENSION = ".v2_lock"; static final String ACQUIRER_ID = "acquirer_id"; public static final String NO_TTL = "-1"; static final String LOCK_EXPIRY_TIME = "lock_expiry_time"; diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java index 7319a5324777a..6fd198747570f 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java @@ -27,7 +27,7 @@ *
                          * This class delegate the responsibility of actually fetching the block when demanded to its subclasses using * {@link OnDemandBlockIndexInput#fetchBlock(int)}. - * + *

                          * Like {@link IndexInput}, this class may only be used from one thread as it is not thread safe. * However, a cleaning action may run from another thread triggered by the {@link Cleaner}, but * this is okay because at that point the {@link OnDemandBlockIndexInput} instance is phantom @@ -428,10 +428,10 @@ Builder blockSizeShift(int blockSizeShift) { * instance to hold the current underlying IndexInput, while allowing it to * be changed out with different instances as {@link OnDemandBlockIndexInput} * reads through the data. - * + *

                          * This class implements {@link Runnable} so that it can be passed directly * to the cleaner to run its close action. - * + *

                          * [1]: https://github.com/apache/lucene/blob/8340b01c3cc229f33584ce2178b07b8984daa6a9/lucene/core/src/java/org/apache/lucene/store/IndexInput.java#L32-L33 */ private static class BlockHolder implements Closeable, Runnable { diff --git a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java index efc762ef00d52..05049e5d07373 100644 --- a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java +++ b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java @@ -14,7 +14,7 @@ /** * Default implementation for the {@link TranslogDeletionPolicy}. Plugins can override the default behaviour * via the {@link org.opensearch.plugins.EnginePlugin#getCustomTranslogDeletionPolicyFactory()}. - * + *

                          * The default policy uses total number, size in bytes and maximum age for files. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 4bc9a711894b7..4d0fc13d433c6 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -18,6 +18,7 @@ import org.opensearch.index.engine.LifecycleAware; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import java.io.Closeable; import java.io.IOException; @@ -83,11 +84,14 @@ public InternalTranslogManager( * Rolls the translog generation and cleans unneeded. */ @Override - public void rollTranslogGeneration() throws TranslogException { + public void rollTranslogGeneration() throws TranslogException, IOException { try (ReleasableLock ignored = readLock.acquire()) { engineLifeCycleAware.ensureOpen(); translog.rollGeneration(); translog.trimUnreferencedReaders(); + } catch (TranslogUploadFailedException e) { + // Do not trigger the translogEventListener as it fails the Engine while this is only an issue with remote upload + throw e; } catch (AlreadyClosedException e) { translogEventListener.onFailure("translog roll generation failed", e); throw e; @@ -426,10 +430,10 @@ public String getTranslogUUID() { * @return if the translog should be flushed */ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { - final long translogGenerationOfLastCommit = translog.getMinGenerationForSeqNo( - localCheckpointOfLastCommit + 1 - ).translogFileGeneration; - if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { + // This is the minimum seqNo that is referred in translog and considered for calculating translog size + long minTranslogRefSeqNo = translog.getMinUnreferencedSeqNoInSegments(localCheckpointOfLastCommit + 1); + final long minReferencedTranslogGeneration = translog.getMinGenerationForSeqNo(minTranslogRefSeqNo).translogFileGeneration; + if (translog.sizeInBytesByMinGen(minReferencedTranslogGeneration) < flushThreshold) { return false; } /* @@ -450,7 +454,7 @@ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long fl final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo( localCheckpointTrackerSupplier.get().getProcessedCheckpoint() + 1 ).translogFileGeneration; - return translogGenerationOfLastCommit < translogGenerationOfNewCommit + return minReferencedTranslogGeneration < translogGenerationOfNewCommit || localCheckpointTrackerSupplier.get().getProcessedCheckpoint() == localCheckpointTrackerSupplier.get().getMaxSeqNo(); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 004d1bfdca36d..a305a774f5854 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -161,6 +161,7 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t remoteTranslogTransferTracker ); RemoteFsTranslog.download(translogTransferManager, location, logger); + logger.trace(remoteTranslogTransferTracker.toString()); } static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { @@ -173,15 +174,20 @@ static void download(TranslogTransferManager translogTransferManager, Path locat */ IOException ex = null; for (int i = 0; i <= DOWNLOAD_RETRIES; i++) { + boolean success = false; + long startTimeMs = System.currentTimeMillis(); try { downloadOnce(translogTransferManager, location, logger); + success = true; return; } catch (FileNotFoundException | NoSuchFileException e) { // continue till download retries ex = e; + } finally { + logger.trace("downloadOnce success={} timeElapsed={}", success, (System.currentTimeMillis() - startTimeMs)); } } - logger.debug("Exhausted all download retries during translog/checkpoint file download"); + logger.info("Exhausted all download retries during translog/checkpoint file download"); throw ex; } @@ -242,15 +248,10 @@ public static TranslogTransferManager buildTranslogTransferManager( @Override public boolean ensureSynced(Location location) throws IOException { - try { - assert location.generation <= current.getGeneration(); - if (location.generation == current.getGeneration()) { - ensureOpen(); - return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); - } - } catch (final Exception ex) { - closeOnTragicEvent(ex); - throw ex; + assert location.generation <= current.getGeneration(); + if (location.generation == current.getGeneration()) { + ensureOpen(); + return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); } return false; } @@ -327,7 +328,8 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { generation, location, readers, - Translog::getCommitCheckpointFileName + Translog::getCommitCheckpointFileName, + config.getNodeId() ).build() ) { return translogTransferManager.transferSnapshot( @@ -354,14 +356,8 @@ private boolean syncToDisk() throws IOException { @Override public void sync() throws IOException { - try { - if (syncToDisk() || syncNeeded()) { - prepareAndUpload(primaryTermSupplier.getAsLong(), null); - } - } catch (final Exception e) { - tragedy.setTragicException(e); - closeOnTragicEvent(e); - throw e; + if (syncToDisk() || syncNeeded()) { + prepareAndUpload(primaryTermSupplier.getAsLong(), null); } } @@ -435,7 +431,7 @@ public void trimUnreferencedReaders() throws IOException { // cleans up remote translog files not referenced in latest uploaded metadata. // This enables us to restore translog from the metadata in case of failover or relocation. Set generationsToDelete = new HashSet<>(); - for (long generation = minRemoteGenReferenced - 1; generation >= 0; generation--) { + for (long generation = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); generation >= 0; generation--) { if (fileTransferTracker.uploaded(Translog.getFilename(generation)) == false) { break; } @@ -554,4 +550,9 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) thro } } } + + @Override + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minSeqNoToKeep; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index baa3737d576de..b44aa6e059224 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -840,7 +840,7 @@ public boolean ensureSynced(Stream locations) throws IOException { /** * Closes the translog if the current translog writer experienced a tragic exception. - * + *

                          * Note that in case this thread closes the translog it must not already be holding a read lock on the translog as it will acquire a * write lock in the course of closing the translog * @@ -1976,7 +1976,7 @@ static String createEmptyTranslog( /** * Creates a new empty translog within the specified {@code location} that contains the given {@code initialGlobalCheckpoint}, * {@code primaryTerm} and {@code translogUUID}. - * + *

                          * This method should be used directly under specific circumstances like for shards that will see no indexing. Specifying a non-unique * translog UUID could cause a lot of issues and that's why in all (but one) cases the method * {@link #createEmptyTranslog(Path, long, ShardId, long)} should be used instead. @@ -2034,4 +2034,8 @@ public static String createEmptyTranslog( writer.close(); return uuid; } + + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minUnrefCheckpointInLastCommit; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java index cac88bee82a73..6e75ebd847b5e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java @@ -56,6 +56,7 @@ public final class TranslogConfig { private final ShardId shardId; private final Path translogPath; private final ByteSizeValue bufferSize; + private final String nodeId; /** * Creates a new TranslogConfig instance @@ -64,16 +65,24 @@ public final class TranslogConfig { * @param indexSettings the index settings used to set internal variables * @param bigArrays a bigArrays instance used for temporarily allocating write operations */ - public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays) { - this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE); + public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, String nodeId) { + this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE, nodeId); } - TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, ByteSizeValue bufferSize) { + TranslogConfig( + ShardId shardId, + Path translogPath, + IndexSettings indexSettings, + BigArrays bigArrays, + ByteSizeValue bufferSize, + String nodeId + ) { this.bufferSize = bufferSize; this.indexSettings = indexSettings; this.shardId = shardId; this.translogPath = translogPath; this.bigArrays = bigArrays; + this.nodeId = nodeId; } /** @@ -110,4 +119,8 @@ public Path getTranslogPath() { public ByteSizeValue getBufferSize() { return bufferSize; } + + public String getNodeId() { + return nodeId; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 78aaa1bc13a00..148fd67fb413e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -21,7 +21,7 @@ public interface TranslogManager { /** * Rolls the translog generation and cleans unneeded. */ - void rollTranslogGeneration() throws TranslogException; + void rollTranslogGeneration() throws TranslogException, IOException; /** * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index a5f0607431a8b..3f33c155be15e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -376,7 +376,7 @@ synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { /** * write all buffered ops to disk and fsync file. - * + *

                          * Note: any exception during the sync process will be interpreted as a tragic exception and the writer will be closed before * raising the exception. * @return true if this call caused an actual sync operation diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 0d85123b60c75..25fcdc614172a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -194,7 +194,8 @@ private boolean isTranslogClean(ShardPath shardPath, ClusterState clusterState, shardPath.getShardId(), translogPath, indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardPath.getShardId().id()); // We open translog to check for corruption, do not clean anything. diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index 10dec13c81e1a..fb78731246a07 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -40,11 +40,14 @@ public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Clo private final long primaryTerm; private long minTranslogGeneration; - TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size) { + private String nodeId; + + TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size, String nodeId) { translogCheckpointFileInfoTupleSet = new HashSet<>(size); this.size = size; this.generation = generation; this.primaryTerm = primaryTerm; + this.nodeId = nodeId; } private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { @@ -63,7 +66,13 @@ public Set getTranslogFileSnapshots() { @Override public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, translogCheckpointFileInfoTupleSet.size() * 2); + return new TranslogTransferMetadata( + primaryTerm, + generation, + minTranslogGeneration, + translogCheckpointFileInfoTupleSet.size() * 2, + nodeId + ); } @Override @@ -110,19 +119,22 @@ public static class Builder { private final List readers; private final Function checkpointGenFileNameMapper; private final Path location; + private final String nodeId; public Builder( long primaryTerm, long generation, Path location, List readers, - Function checkpointGenFileNameMapper + Function checkpointGenFileNameMapper, + String nodeId ) { this.primaryTerm = primaryTerm; this.generation = generation; this.readers = readers; this.checkpointGenFileNameMapper = checkpointGenFileNameMapper; this.location = location; + this.nodeId = nodeId; } public TranslogCheckpointTransferSnapshot build() throws IOException { @@ -134,7 +146,8 @@ public TranslogCheckpointTransferSnapshot build() throws IOException { TranslogCheckpointTransferSnapshot translogTransferSnapshot = new TranslogCheckpointTransferSnapshot( primaryTerm, generation, - readers.size() + readers.size(), + nodeId ); for (TranslogReader reader : readers) { final long readerGeneration = reader.getGeneration(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index d7e50cdcf32e4..2f6055df87804 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -24,6 +24,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; @@ -41,7 +42,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -64,6 +64,8 @@ public class TranslogTransferManager { private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; + private static final int METADATA_FILES_TO_FETCH = 10; + private final Logger logger; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; @@ -153,14 +155,17 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans try { if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { - Exception ex = new TimeoutException("Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete"); + Exception ex = new TranslogUploadFailedException( + "Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete" + ); exceptionList.forEach(ex::addSuppressed); throw ex; } } catch (InterruptedException ex) { - exceptionList.forEach(ex::addSuppressed); + Exception exception = new TranslogUploadFailedException("Failed to upload " + transferSnapshot, ex); + exceptionList.forEach(exception::addSuppressed); Thread.currentThread().interrupt(); - throw ex; + throw exception; } if (exceptionList.isEmpty()) { TransferFileSnapshot tlogMetadata = prepareMetadata(transferSnapshot); @@ -173,7 +178,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); remoteTranslogTransferTracker.addUploadBytesFailed(metadataBytesToUpload); // outer catch handles capturing stats on upload failure - throw exception; + throw new TranslogUploadFailedException("Failed to upload " + tlogMetadata.getName(), exception); } remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); @@ -182,7 +187,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans translogTransferListener.onUploadComplete(transferSnapshot); return true; } else { - Exception ex = new IOException("Failed to upload " + exceptionList.size() + " files during transfer"); + Exception ex = new TranslogUploadFailedException("Failed to upload " + exceptionList.size() + " files during transfer"); exceptionList.forEach(ex::addSuppressed); throw ex; } @@ -275,6 +280,10 @@ public TranslogTransferMetadata readMetadata() throws IOException { LatchedActionListener> latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(blobMetadataList -> { if (blobMetadataList.isEmpty()) return; + RemoteStoreUtils.verifyNoMultipleWriters( + blobMetadataList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); String filename = blobMetadataList.get(0).name(); boolean downloadStatus = false; long downloadStartTime = System.nanoTime(), bytesToRead = 0; @@ -295,6 +304,9 @@ public TranslogTransferMetadata readMetadata() throws IOException { } } }, e -> { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } logger.error(() -> new ParameterizedMessage("Exception while listing metadata files"), e); exceptionSetOnce.set((IOException) e); }), @@ -305,7 +317,7 @@ public TranslogTransferMetadata readMetadata() throws IOException { transferService.listAllInSortedOrder( remoteMetadataTransferPath, TranslogTransferMetadata.METADATA_PREFIX, - 1, + METADATA_FILES_TO_FETCH, latchedActionListener ); latch.await(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java index a8b3404d3f2ce..052206d807fa6 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog.transfer; import org.opensearch.common.SetOnce; +import org.opensearch.common.collect.Tuple; import org.opensearch.index.remote.RemoteStoreUtils; import java.util.Arrays; @@ -30,7 +31,7 @@ public class TranslogTransferMetadata { private final long minTranslogGeneration; - private int count; + private final int count; private final SetOnce> generationToPrimaryTermMapper = new SetOnce<>(); @@ -46,12 +47,22 @@ public class TranslogTransferMetadata { private final long createdAt; - public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + private final String nodeId; + + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count, String nodeId) { this.primaryTerm = primaryTerm; this.generation = generation; this.minTranslogGeneration = minTranslogGeneration; this.count = count; this.createdAt = System.currentTimeMillis(); + this.nodeId = nodeId; + } + + /* + Used only at the time of download . Since details are read from content , nodeId is not available + */ + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + this(primaryTerm, generation, minTranslogGeneration, count, ""); } public long getPrimaryTerm() { @@ -89,11 +100,33 @@ public String getFileName() { RemoteStoreUtils.invertLong(primaryTerm), RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(createdAt), + String.valueOf(Objects.hash(nodeId)), String.valueOf(CURRENT_VERSION) ) ); } + public static Tuple, String> getNodeIdByPrimaryTermAndGeneration(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id + return null; + } + return new Tuple<>(new Tuple<>(RemoteStoreUtils.invertLong(tokens[1]), RemoteStoreUtils.invertLong(tokens[2])), tokens[4]); + } + + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(METADATA_SEPARATOR, tokens[1], tokens[2]); + + String nodeId = tokens[4]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + @Override public int hashCode() { return Objects.hash(primaryTerm, generation); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java new file mode 100644 index 0000000000000..4a9b10ec5a52e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import java.io.IOException; + +/** + * Exception is thrown if there are any exceptions while uploading translog to remote store. + * @opensearch.internal + */ +public class TranslogUploadFailedException extends IOException { + + public TranslogUploadFailedException(String message) { + super(message); + } + + public TranslogUploadFailedException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index f5e71327b6e7b..34e0153dbe730 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -144,6 +144,7 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; @@ -298,6 +299,17 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); + /** + * This setting is used to restrict creation of index where the 'index.replication.type' index setting is set. + * If disabled, the replication type can be specified. + */ + public static final Setting CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING = Setting.boolSetting( + "cluster.restrict.index.replication_type", + false, + Property.NodeScope, + Property.Final + ); + /** * The node's settings. */ @@ -335,6 +347,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CountDownLatch closeLatch = new CountDownLatch(1); private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; + private final RecoverySettings recoverySettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; @@ -380,7 +393,8 @@ public IndicesService( Supplier repositoriesServiceSupplier, FileCacheCleaner fileCacheCleaner, SearchRequestStats searchRequestStats, - @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + RecoverySettings recoverySettings ) { this.settings = settings; this.threadPool = threadPool; @@ -477,6 +491,7 @@ protected void closeInternal() { this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); + this.recoverySettings = recoverySettings; } /** @@ -874,7 +889,8 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, - this::getClusterRemoteTranslogBufferInterval + this::getClusterRemoteTranslogBufferInterval, + this.recoverySettings ); } @@ -921,7 +937,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { /** * creates a new mapper service for the given index, in order to do administrative work like mapping updates. * This *should not* be used for document parsing. Doing so will result in an exception. - * + *

                          * Note: the returned {@link MapperService} should be closed when unneeded. */ public synchronized MapperService createIndexMapperService(IndexMetadata indexMetadata) throws IOException { @@ -1133,7 +1149,7 @@ public void deleteUnassignedIndex(String reason, IndexMetadata metadata, Cluster /** * Deletes the index store trying to acquire all shards locks for this index. * This method will delete the metadata for the index even if the actual shards can't be locked. - * + *

                          * Package private for testing */ void deleteIndexStore(String reason, IndexMetadata metadata) throws IOException { @@ -1214,7 +1230,7 @@ public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexS * This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting * is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)} * of if the shards lock can not be acquired. - * + *

                          * On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove * the index folder as well. * diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index d9a64781c3f46..e345b613eebbd 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -54,7 +54,7 @@ /** * This class contains the logic used to check the cluster-wide shard limit before shards are created and ensuring that the limit is * updated correctly on setting updates, etc. - * + *

                          * NOTE: This is the limit applied at *shard creation time*. If you are looking for the limit applied at *allocation* time, which is * controlled by a different setting, * see {@link org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider}. diff --git a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java index 601bd79a24746..13cc78b620b9a 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java @@ -48,11 +48,11 @@ public class PreBuiltCacheFactory { /** * The strategy of caching the analyzer - * - * ONE Exactly one version is stored. Useful for analyzers which do not store version information - * LUCENE Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version - * OPENSEARCH Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch - * releases, when the lucene version does not change + *

                            + *
                          • ONE : Exactly one version is stored. Useful for analyzers which do not store version information
                          • + *
                          • LUCENE : Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version
                          • + *
                          • OPENSEARCH : Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch releases, when the lucene version does not change
                          • + *
                          */ public enum CachingStrategy { ONE, diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index 6c7632a8a408d..cb2bedf00de99 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -376,7 +376,8 @@ private Tuple createRecovery transportService, request.targetNode(), recoverySettings, - throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime) + throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime), + shard.isRemoteTranslogEnabled() ); handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index e2346ae078339..0f3025369833d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; @@ -84,6 +85,17 @@ public class RecoverySettings { Property.NodeScope ); + /** + * Controls the maximum number of streams that can be started concurrently per recovery when downloading from the remote store. + */ + public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = new Setting<>( + "indices.recovery.max_concurrent_remote_store_streams", + (s) -> Integer.toString(Math.max(1, OpenSearchExecutors.allocatedProcessors(s) / 2)), + (s) -> Setting.parseInt(s, 1, "indices.recovery.max_concurrent_remote_store_streams"), + Property.Dynamic, + Property.NodeScope + ); + /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. @@ -149,6 +161,7 @@ public class RecoverySettings { private volatile ByteSizeValue maxBytesPerSec; private volatile int maxConcurrentFileChunks; private volatile int maxConcurrentOperations; + private volatile int maxConcurrentRemoteStoreStreams; private volatile SimpleRateLimiter rateLimiter; private volatile TimeValue retryDelayStateSync; private volatile TimeValue retryDelayNetwork; @@ -163,6 +176,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); this.maxConcurrentOperations = INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.get(settings); + this.maxConcurrentRemoteStoreStreams = INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the cluster-manager time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); @@ -184,6 +198,10 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, this::setMaxConcurrentOperations); + clusterSettings.addSettingsUpdateConsumer( + INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + this::setMaxConcurrentRemoteStoreStreams + ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); @@ -279,4 +297,12 @@ public int getMaxConcurrentOperations() { private void setMaxConcurrentOperations(int maxConcurrentOperations) { this.maxConcurrentOperations = maxConcurrentOperations; } + + public int getMaxConcurrentRemoteStoreStreams() { + return this.maxConcurrentRemoteStoreStreams; + } + + private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStreams) { + this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; + } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java index ac28dabf815a5..707e41c8c27e1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java @@ -55,7 +55,7 @@ public interface RecoveryTargetHandler extends FileChunkWriter { /** * Used with Segment replication only - * + *

                          * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index 36beabc4a9026..37227596fdfe7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -75,6 +75,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private final AtomicLong requestSeqNoGenerator = new AtomicLong(0); private final RetryableTransportClient retryableTransportClient; private final RemoteSegmentFileChunkWriter fileChunkWriter; + private final boolean remoteStoreEnabled; public RemoteRecoveryTargetHandler( long recoveryId, @@ -82,7 +83,8 @@ public RemoteRecoveryTargetHandler( TransportService transportService, DiscoveryNode targetNode, RecoverySettings recoverySettings, - Consumer onSourceThrottle + Consumer onSourceThrottle, + boolean remoteStoreEnabled ) { this.transportService = transportService; // It is safe to pass the retry timeout value here because RemoteRecoveryTargetHandler @@ -111,6 +113,7 @@ public RemoteRecoveryTargetHandler( requestSeqNoGenerator, onSourceThrottle ); + this.remoteStoreEnabled = remoteStoreEnabled; } public DiscoveryNode targetNode() { @@ -129,7 +132,13 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + if (remoteStoreEnabled) { + // If remote store is enabled, during the prepare_translog phase, translog is also downloaded on the + // target host along with incremental segments download. + retryableTransportClient.executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); + } else { + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + } } @Override @@ -189,7 +198,7 @@ public void indexTranslogOperations( /** * Used with Segment replication only - * + *

                          * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 268894b1c0af3..33967c0203516 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -61,8 +61,8 @@ class OngoingSegmentReplications { this.allocationIdToHandlers = ConcurrentCollections.newConcurrentMap(); } - /** - * Operations on the {@link #copyStateMap} member. + /* + Operations on the {@link #copyStateMap} member. */ /** @@ -85,12 +85,12 @@ synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) thro // build the CopyState object and cache it before returning final CopyState copyState = new CopyState(checkpoint, indexShard); - /** - * Use the checkpoint from the request as the key in the map, rather than - * the checkpoint from the created CopyState. This maximizes cache hits - * if replication targets make a request with an older checkpoint. - * Replication targets are expected to fetch the checkpoint in the response - * CopyState to bring themselves up to date. + /* + Use the checkpoint from the request as the key in the map, rather than + the checkpoint from the created CopyState. This maximizes cache hits + if replication targets make a request with an older checkpoint. + Replication targets are expected to fetch the checkpoint in the response + CopyState to bring themselves up to date. */ addToCopyStateMap(checkpoint, copyState); return copyState; diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 9dcd16c53e6f3..02fc8feefd698 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -22,6 +22,7 @@ import org.opensearch.transport.TransportService; import java.util.List; +import java.util.function.BiConsumer; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES; @@ -80,8 +81,13 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { + // fileProgressTracker is a no-op for node to node recovery + // MultiFileWriter takes care of progress tracking for downloads in this scenario + // TODO: Move state management and tracking into replication methods and use chunking and data + // copy mechanisms only from MultiFileWriter final Writeable.Reader reader = GetSegmentFilesResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index aeb690465905f..b06b3e0497cf7 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -14,24 +14,26 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.Version; -import org.opensearch.action.support.GroupedActionListener; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import java.nio.file.Path; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** @@ -45,6 +47,7 @@ public class RemoteStoreReplicationSource implements SegmentReplicationSource { private final IndexShard indexShard; private final RemoteSegmentStoreDirectory remoteDirectory; + private final CancellableThreads cancellableThreads = new CancellableThreads(); public RemoteStoreReplicationSource(IndexShard indexShard) { this.indexShard = indexShard; @@ -63,7 +66,7 @@ public void getCheckpointMetadata( // TODO: Need to figure out a way to pass this information for segment metadata via remote store. try (final GatedCloseable segmentInfosSnapshot = indexShard.getSegmentInfosSnapshot()) { final Version version = segmentInfosSnapshot.get().getCommitLuceneVersion(); - RemoteSegmentMetadata mdFile = remoteDirectory.init(); + final RemoteSegmentMetadata mdFile = getRemoteSegmentMetadata(); // During initial recovery flow, the remote store might not // have metadata as primary hasn't uploaded anything yet. if (mdFile == null && indexShard.state().equals(IndexShardState.STARTED) == false) { @@ -98,6 +101,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { try { @@ -107,52 +111,50 @@ public void getSegmentFiles( } logger.debug("Downloading segment files from remote store {}", filesToFetch); - RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); - List toDownloadSegments = new ArrayList<>(); - Collection directoryFiles = List.of(indexShard.store().directory().listAll()); - if (remoteSegmentMetadata != null) { - try { - indexShard.store().incRef(); - indexShard.remoteStore().incRef(); - final Directory storeDirectory = indexShard.store().directory(); - final ShardPath shardPath = indexShard.shardPath(); - for (StoreFileMetadata fileMetadata : filesToFetch) { - String file = fileMetadata.name(); - assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; - toDownloadSegments.add(fileMetadata); - } - downloadSegments(storeDirectory, remoteDirectory, toDownloadSegments, shardPath, listener); - logger.debug("Downloaded segment files from remote store {}", toDownloadSegments); - } finally { - indexShard.store().decRef(); - indexShard.remoteStore().decRef(); + if (remoteMetadataExists()) { + final Directory storeDirectory = indexShard.store().directory(); + final Collection directoryFiles = List.of(storeDirectory.listAll()); + final List toDownloadSegmentNames = new ArrayList<>(); + for (StoreFileMetadata fileMetadata : filesToFetch) { + String file = fileMetadata.name(); + assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; + toDownloadSegmentNames.add(file); } + indexShard.getFileDownloader() + .downloadAsync( + cancellableThreads, + remoteDirectory, + new ReplicationStatsDirectoryWrapper(storeDirectory, fileProgressTracker), + toDownloadSegmentNames, + ActionListener.map(listener, r -> new GetSegmentFilesResponse(filesToFetch)) + ); + } else { + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } - } catch (Exception e) { + } catch (IOException | RuntimeException e) { listener.onFailure(e); } } - private void downloadSegments( - Directory storeDirectory, - RemoteSegmentStoreDirectory remoteStoreDirectory, - List toDownloadSegments, - ShardPath shardPath, - ActionListener completionListener - ) { - final Path indexPath = shardPath == null ? null : shardPath.resolveIndex(); - final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( - ActionListener.map(completionListener, v -> new GetSegmentFilesResponse(toDownloadSegments)), - toDownloadSegments.size() - ); - ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, result -> null); - toDownloadSegments.forEach( - fileMetadata -> remoteStoreDirectory.copyTo(fileMetadata.name(), storeDirectory, indexPath, segmentsDownloadListener) - ); + @Override + public void cancel() { + this.cancellableThreads.cancel("Canceled by target"); } @Override public String getDescription() { return "RemoteStoreReplicationSource"; } + + private boolean remoteMetadataExists() throws IOException { + final AtomicBoolean metadataExists = new AtomicBoolean(false); + cancellableThreads.executeIO(() -> metadataExists.set(remoteDirectory.readLatestMetadataFile() != null)); + return metadataExists.get(); + } + + private RemoteSegmentMetadata getRemoteSegmentMetadata() throws IOException { + AtomicReference mdFile = new AtomicReference<>(); + cancellableThreads.executeIO(() -> mdFile.set(remoteDirectory.init())); + return mdFile.get(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 6676b5b667e42..24f0cb15ddb25 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -8,13 +8,19 @@ package org.opensearch.indices.replication; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; import java.util.List; +import java.util.function.BiConsumer; /** * Represents the source of a replication event. @@ -39,6 +45,7 @@ public interface SegmentReplicationSource { * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. * @param filesToFetch {@link List} List of files to fetch. * @param indexShard {@link IndexShard} Reference to the IndexShard. + * @param fileProgressTracker {@link BiConsumer} A consumer that updates the replication progress for shard files. * @param listener {@link ActionListener} Listener that completes with the list of files copied. */ void getSegmentFiles( @@ -46,6 +53,7 @@ void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ); @@ -58,4 +66,69 @@ void getSegmentFiles( * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. */ default void cancel() {} + + /** + * Directory wrapper that records copy process for replication statistics + * + * @opensearch.internal + */ + final class ReplicationStatsDirectoryWrapper extends FilterDirectory { + private final BiConsumer fileProgressTracker; + + ReplicationStatsDirectoryWrapper(Directory in, BiConsumer fileProgressTracker) { + super(in); + this.fileProgressTracker = fileProgressTracker; + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + // here we wrap the index input form the source directory to report progress of file copy for the recovery stats. + // we increment the num bytes recovered in the readBytes method below, if users pull statistics they can see immediately + // how much has been recovered. + in.copyFrom(new FilterDirectory(from) { + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final IndexInput input = in.openInput(name, context); + return new IndexInput("StatsDirectoryWrapper(" + input.toString() + ")") { + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public long getFilePointer() { + throw new UnsupportedOperationException("only straight copies are supported"); + } + + @Override + public void seek(long pos) throws IOException { + throw new UnsupportedOperationException("seeks are not supported"); + } + + @Override + public long length() { + return input.length(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException("slices are not supported"); + } + + @Override + public byte readByte() throws IOException { + throw new UnsupportedOperationException("use a buffer if you wanna perform well"); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + // we rely on the fact that copyFrom uses a buffer + input.readBytes(b, offset, len); + fileProgressTracker.accept(dest, (long) len); + } + }; + } + }, src, dest, context); + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index e2c47b0fb3159..674c09311c645 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -12,8 +12,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ListenableFuture; @@ -22,7 +20,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.indices.recovery.DelayRecoveryException; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; import org.opensearch.indices.replication.common.CopyState; @@ -146,12 +143,6 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene ); }; cancellableThreads.checkForCancel(); - final IndexShardRoutingTable routingTable = shard.getReplicationGroup().getRoutingTable(); - ShardRouting targetShardRouting = routingTable.getByAllocationId(request.getTargetAllocationId()); - if (targetShardRouting == null) { - logger.debug("delaying replication of {} as it is not listed as assigned to target node {}", shard.shardId(), targetNode); - throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); - } final StepListener sendFileStep = new StepListener<>(); Set storeFiles = new HashSet<>(Arrays.asList(shard.store().directory().listAll())); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 5ae480b7d63a4..cc71ef816e525 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -18,7 +18,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.OpenSearchCorruptionException; -import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; import org.opensearch.common.lucene.Lucene; @@ -171,7 +170,14 @@ public void startReplication(ActionListener listener) { final List filesToFetch = getFiles(checkpointInfo); state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, indexShard, getFilesListener); + source.getSegmentFiles( + getId(), + checkpointInfo.getCheckpoint(), + filesToFetch, + indexShard, + this::updateFileRecoveryBytes, + getFilesListener + ); }, listener::onFailure); getFilesListener.whenComplete(response -> { @@ -226,6 +232,7 @@ private List getFiles(CheckpointInfoResponse checkpointInfo) return missingFiles; } + // pkg private for tests private boolean validateLocalChecksum(StoreFileMetadata file) { try (IndexInput indexInput = indexShard.store().directory().openInput(file.name(), IOContext.DEFAULT)) { String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput)); @@ -237,8 +244,30 @@ private boolean validateLocalChecksum(StoreFileMetadata file) { return false; } } catch (IOException e) { - throw new UncheckedIOException("Error reading " + file, e); + logger.warn("Error reading " + file, e); + // Delete file on exceptions so that it can be re-downloaded. This is safe to do as this file is local only + // and not referenced by reader. + try { + indexShard.store().directory().deleteFile(file.name()); + } catch (IOException ex) { + throw new UncheckedIOException("Error reading " + file, e); + } + return false; + } + } + + /** + * Updates the state to reflect recovery progress for the given file and + * updates the last access time for the target. + * @param fileName Name of the file being downloaded + * @param bytesRecovered Number of bytes recovered + */ + private void updateFileRecoveryBytes(String fileName, long bytesRecovered) { + ReplicationLuceneIndex index = state.getIndex(); + if (index != null) { + index.addRecoveredBytesToFile(fileName, bytesRecovered); } + setLastAccessTime(); } private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) throws OpenSearchCorruptionException { @@ -261,9 +290,7 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are - // broken. We have to clean up this shard entirely, remove all files and bubble it up to the - // source shard since this index might be broken there as well? The Source can handle this and checks - // its content on disk if possible. + // broken. We have to clean up this shard entirely, remove all files and bubble it up. try { try { store.removeCorruptionMarker(); @@ -279,14 +306,14 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) // In this case the shard is closed at some point while updating the reader. // This can happen when the engine is closed in a separate thread. logger.warn("Shard is already closed, closing replication"); - } catch (OpenSearchException ex) { + } catch (CancellableThreads.ExecutionCancelledException ex) { /* Ignore closed replication target as it can happen due to index shard closed event in a separate thread. In such scenario, ignore the exception */ - assert cancellableThreads.isCancelled() : "Replication target closed but segment replication not cancelled"; + assert cancellableThreads.isCancelled() : "Replication target cancelled but cancellable threads not cancelled"; } catch (Exception ex) { - throw new OpenSearchCorruptionException(ex); + throw new ReplicationFailedException(ex); } finally { if (store != null) { store.decRef(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index ffc4ab86661db..46095adfe96b4 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchCorruptionException; import org.opensearch.action.support.ChannelActionListener; @@ -28,6 +29,7 @@ import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.recovery.ForceSyncRequest; @@ -46,6 +48,7 @@ import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; @@ -522,7 +525,7 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { logger.debug("Replication failed {}", target.description()); - if (e instanceof OpenSearchCorruptionException) { + if (isStoreCorrupt(target) || e instanceof CorruptIndexException || e instanceof OpenSearchCorruptionException) { onGoingReplications.fail(replicationId, new ReplicationFailedException("Store corruption during replication", e), true); return; } @@ -531,6 +534,27 @@ public void onFailure(Exception e) { }); } + private boolean isStoreCorrupt(SegmentReplicationTarget target) { + // ensure target is not already closed. In that case + // we can assume the store is not corrupt and that the replication + // event completed successfully. + if (target.refCount() > 0) { + final Store store = target.store(); + if (store.tryIncRef()) { + try { + return store.isMarkedCorrupted(); + } catch (IOException ex) { + logger.warn("Unable to determine if store is corrupt", ex); + return false; + } finally { + store.decRef(); + } + } + } + // store already closed. + return false; + } + private class FileChunkTransportRequestHandler implements TransportRequestHandler { // How many bytes we've copied since we last called RateLimiter.pause diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java index 39649727e31d7..3775be7b6da15 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java @@ -58,8 +58,7 @@ public class ReplicationRequestTracker { * This method will mark that a request with a unique sequence number has been received. If this is the * first time the unique request has been received, this method will return a listener to be completed. * The caller should then perform the requested action and complete the returned listener. - * - * + *

                          * If the unique request has already been received, this method will either complete the provided listener * or attach that listener to the listener returned in the first call. In this case, the method will * return null and the caller should not perform the requested action as a prior caller is already diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index ec6b4d06b32c3..aac59df4f6573 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -91,6 +91,9 @@ public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIn // make sure the store is not released until we are done. this.cancellableThreads = new CancellableThreads(); store.incRef(); + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().incRef(); + } } public long getId() { @@ -278,6 +281,12 @@ public abstract void writeFileChunk( ); protected void closeInternal() { - store.decRef(); + try { + store.decRef(); + } finally { + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().decRef(); + } + } } } diff --git a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java index f97388a8262ff..5185b740d90cb 100644 --- a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java @@ -74,7 +74,7 @@ private ConfigurationUtils() {} /** * Returns and removes the specified optional property from the specified configuration map. - * + *

                          * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringProperty( @@ -89,7 +89,7 @@ public static String readOptionalStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -104,7 +104,7 @@ public static String readStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -141,7 +141,7 @@ private static String readString(String processorType, String processorTag, Stri /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -180,7 +180,7 @@ private static String readStringOrInt(String processorType, String processorTag, /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringOrIntProperty( @@ -228,7 +228,7 @@ private static Boolean readBoolean(String processorType, String processorTag, St /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -257,7 +257,7 @@ public static Integer readIntProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -285,7 +285,7 @@ public static Double readDoubleProperty( /** * Returns and removes the specified property of type list from the specified configuration map. - * + *

                          * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. */ public static List readOptionalList( @@ -303,7 +303,7 @@ public static List readOptionalList( /** * Returns and removes the specified property of type list from the specified configuration map. - * + *

                          * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -333,7 +333,7 @@ private static List readList(String processorType, String processorTag, S /** * Returns and removes the specified property of type map from the specified configuration map. - * + *

                          * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -353,7 +353,7 @@ public static Map readMap( /** * Returns and removes the specified property of type map from the specified configuration map. - * + *

                          * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. */ public static Map readOptionalMap( diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 91003e963e302..baf357a4bc0d5 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -695,7 +695,7 @@ public IngestStats stats() { /** * Adds a listener that gets invoked with the current cluster state before processor factories * get invoked. - * + *

                          * This is useful for components that are used by ingest processors, so that they have the opportunity to update * before these components get used by the ingest processor factory. */ diff --git a/server/src/main/java/org/opensearch/ingest/Pipeline.java b/server/src/main/java/org/opensearch/ingest/Pipeline.java index 37643e48ed2be..2541cfbf4af77 100644 --- a/server/src/main/java/org/opensearch/ingest/Pipeline.java +++ b/server/src/main/java/org/opensearch/ingest/Pipeline.java @@ -123,8 +123,8 @@ public static Pipeline create( /** * Modifies the data of a document to be indexed based on the processor this pipeline holds - * - * If null is returned then this document will be dropped and not indexed, otherwise + *

                          + * If {@code null} is returned then this document will be dropped and not indexed, otherwise * this document will be kept and indexed. */ public void execute(IngestDocument ingestDocument, BiConsumer handler) { diff --git a/server/src/main/java/org/opensearch/ingest/Processor.java b/server/src/main/java/org/opensearch/ingest/Processor.java index e0f196dfcb115..6097045a87e21 100644 --- a/server/src/main/java/org/opensearch/ingest/Processor.java +++ b/server/src/main/java/org/opensearch/ingest/Processor.java @@ -48,7 +48,7 @@ /** * A processor implementation may modify the data belonging to a document. * Whether changes are made and what exactly is modified is up to the implementation. - * + *

                          * Processors may get called concurrently and thus need to be thread-safe. * * @opensearch.internal @@ -57,7 +57,7 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. - * + *

                          * Expert method: only override this method if a processor implementation needs to make an asynchronous call, * otherwise just overwrite {@link #execute(IngestDocument)}. */ diff --git a/server/src/main/java/org/opensearch/ingest/ValueSource.java b/server/src/main/java/org/opensearch/ingest/ValueSource.java index 0ef7c3373596d..3463fb0f83b26 100644 --- a/server/src/main/java/org/opensearch/ingest/ValueSource.java +++ b/server/src/main/java/org/opensearch/ingest/ValueSource.java @@ -56,7 +56,7 @@ public interface ValueSource { /** * Returns a copy of the value this ValueSource holds and resolves templates if there're any. - * + *

                          * For immutable values only a copy of the reference to the value is made. * * @param model The model to be used when resolving any templates diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index 114702ff0d351..8446ab0dd6166 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -235,6 +235,14 @@ public static class DeviceStats implements Writeable, ToXContentFragment { final long previousWritesCompleted; final long currentSectorsWritten; final long previousSectorsWritten; + final long currentReadTime; + final long previousReadTime; + final long currentWriteTime; + final long previousWriteTime; + final long currentQueueSize; + final long previousQueueSize; + final long currentIOTime; + final long previousIOTime; public DeviceStats( final int majorDeviceNumber, @@ -244,6 +252,10 @@ public DeviceStats( final long currentSectorsRead, final long currentWritesCompleted, final long currentSectorsWritten, + final long currentReadTime, + final long currentWriteTime, + final long currrentQueueSize, + final long currentIOTime, final DeviceStats previousDeviceStats ) { this( @@ -257,7 +269,15 @@ public DeviceStats( currentSectorsRead, previousDeviceStats != null ? previousDeviceStats.currentSectorsRead : -1, currentWritesCompleted, - previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1 + previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1, + currentReadTime, + previousDeviceStats != null ? previousDeviceStats.currentReadTime : -1, + currentWriteTime, + previousDeviceStats != null ? previousDeviceStats.currentWriteTime : -1, + currrentQueueSize, + previousDeviceStats != null ? previousDeviceStats.currentQueueSize : -1, + currentIOTime, + previousDeviceStats != null ? previousDeviceStats.currentIOTime : -1 ); } @@ -272,7 +292,15 @@ private DeviceStats( final long currentSectorsRead, final long previousSectorsRead, final long currentWritesCompleted, - final long previousWritesCompleted + final long previousWritesCompleted, + final long currentReadTime, + final long previousReadTime, + final long currentWriteTime, + final long previousWriteTime, + final long currentQueueSize, + final long previousQueueSize, + final long currentIOTime, + final long previousIOTime ) { this.majorDeviceNumber = majorDeviceNumber; this.minorDeviceNumber = minorDeviceNumber; @@ -285,6 +313,14 @@ private DeviceStats( this.previousSectorsRead = previousSectorsRead; this.currentSectorsWritten = currentSectorsWritten; this.previousSectorsWritten = previousSectorsWritten; + this.currentReadTime = currentReadTime; + this.previousReadTime = previousReadTime; + this.currentWriteTime = currentWriteTime; + this.previousWriteTime = previousWriteTime; + this.currentQueueSize = currentQueueSize; + this.previousQueueSize = previousQueueSize; + this.currentIOTime = currentIOTime; + this.previousIOTime = previousIOTime; } public DeviceStats(StreamInput in) throws IOException { @@ -299,6 +335,25 @@ public DeviceStats(StreamInput in) throws IOException { previousSectorsRead = in.readLong(); currentSectorsWritten = in.readLong(); previousSectorsWritten = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + currentReadTime = in.readLong(); + previousReadTime = in.readLong(); + currentWriteTime = in.readLong(); + previousWriteTime = in.readLong(); + currentQueueSize = in.readLong(); + previousQueueSize = in.readLong(); + currentIOTime = in.readLong(); + previousIOTime = in.readLong(); + } else { + currentReadTime = 0; + previousReadTime = 0; + currentWriteTime = 0; + previousWriteTime = 0; + currentQueueSize = 0; + previousQueueSize = 0; + currentIOTime = 0; + previousIOTime = 0; + } } @Override @@ -314,6 +369,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(previousSectorsRead); out.writeLong(currentSectorsWritten); out.writeLong(previousSectorsWritten); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeLong(currentReadTime); + out.writeLong(previousReadTime); + out.writeLong(currentWriteTime); + out.writeLong(previousWriteTime); + out.writeLong(currentQueueSize); + out.writeLong(previousQueueSize); + out.writeLong(currentIOTime); + out.writeLong(previousIOTime); + } } public long operations() { @@ -346,6 +411,39 @@ public long writeKilobytes() { return (currentSectorsWritten - previousSectorsWritten) / 2; } + /** + * Total time taken for all read operations + */ + public long readTime() { + if (previousReadTime == -1) return -1; + return currentReadTime - previousReadTime; + } + + /** + * Total time taken for all write operations + */ + public long writeTime() { + if (previousWriteTime == -1) return -1; + return currentWriteTime - previousWriteTime; + } + + /** + * Queue size based on weighted time spent doing I/Os + */ + public long queueSize() { + if (previousQueueSize == -1) return -1; + return currentQueueSize - previousQueueSize; + } + + /** + * Total time spent doing I/Os + */ + public long ioTimeInMillis() { + if (previousIOTime == -1) return -1; + + return (currentIOTime - previousIOTime); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("device_name", deviceName); @@ -354,9 +452,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(IoStats.WRITE_OPERATIONS, writeOperations()); builder.field(IoStats.READ_KILOBYTES, readKilobytes()); builder.field(IoStats.WRITE_KILOBYTES, writeKilobytes()); + builder.field(IoStats.READ_TIME, readTime()); + builder.field(IoStats.WRITE_TIME, writeTime()); + builder.field(IoStats.QUEUE_SIZE, queueSize()); + builder.field(IoStats.IO_TIME_MS, ioTimeInMillis()); return builder; } - } /** @@ -371,6 +472,10 @@ public static class IoStats implements Writeable, ToXContentFragment { private static final String WRITE_OPERATIONS = "write_operations"; private static final String READ_KILOBYTES = "read_kilobytes"; private static final String WRITE_KILOBYTES = "write_kilobytes"; + private static final String READ_TIME = "read_time"; + private static final String WRITE_TIME = "write_time"; + private static final String QUEUE_SIZE = "queue_size"; + private static final String IO_TIME_MS = "io_time_in_millis"; final DeviceStats[] devicesStats; final long totalOperations; @@ -378,6 +483,10 @@ public static class IoStats implements Writeable, ToXContentFragment { final long totalWriteOperations; final long totalReadKilobytes; final long totalWriteKilobytes; + final long totalReadTime; + final long totalWriteTime; + final long totalQueueSize; + final long totalIOTimeInMillis; public IoStats(final DeviceStats[] devicesStats) { this.devicesStats = devicesStats; @@ -386,18 +495,30 @@ public IoStats(final DeviceStats[] devicesStats) { long totalWriteOperations = 0; long totalReadKilobytes = 0; long totalWriteKilobytes = 0; + long totalReadTime = 0; + long totalWriteTime = 0; + long totalQueueSize = 0; + long totalIOTimeInMillis = 0; for (DeviceStats deviceStats : devicesStats) { totalOperations += deviceStats.operations() != -1 ? deviceStats.operations() : 0; totalReadOperations += deviceStats.readOperations() != -1 ? deviceStats.readOperations() : 0; totalWriteOperations += deviceStats.writeOperations() != -1 ? deviceStats.writeOperations() : 0; totalReadKilobytes += deviceStats.readKilobytes() != -1 ? deviceStats.readKilobytes() : 0; totalWriteKilobytes += deviceStats.writeKilobytes() != -1 ? deviceStats.writeKilobytes() : 0; + totalReadTime += deviceStats.readTime() != -1 ? deviceStats.readTime() : 0; + totalWriteTime += deviceStats.writeTime() != -1 ? deviceStats.writeTime() : 0; + totalQueueSize += deviceStats.queueSize() != -1 ? deviceStats.queueSize() : 0; + totalIOTimeInMillis += deviceStats.ioTimeInMillis() != -1 ? deviceStats.ioTimeInMillis() : 0; } this.totalOperations = totalOperations; this.totalReadOperations = totalReadOperations; this.totalWriteOperations = totalWriteOperations; this.totalReadKilobytes = totalReadKilobytes; this.totalWriteKilobytes = totalWriteKilobytes; + this.totalReadTime = totalReadTime; + this.totalWriteTime = totalWriteTime; + this.totalQueueSize = totalQueueSize; + this.totalIOTimeInMillis = totalIOTimeInMillis; } public IoStats(StreamInput in) throws IOException { @@ -412,6 +533,17 @@ public IoStats(StreamInput in) throws IOException { this.totalWriteOperations = in.readLong(); this.totalReadKilobytes = in.readLong(); this.totalWriteKilobytes = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.totalReadTime = in.readLong(); + this.totalWriteTime = in.readLong(); + this.totalQueueSize = in.readLong(); + this.totalIOTimeInMillis = in.readLong(); + } else { + this.totalReadTime = 0; + this.totalWriteTime = 0; + this.totalQueueSize = 0; + this.totalIOTimeInMillis = 0; + } } @Override @@ -425,6 +557,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalWriteOperations); out.writeLong(totalReadKilobytes); out.writeLong(totalWriteKilobytes); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeLong(totalReadTime); + out.writeLong(totalWriteTime); + out.writeLong(totalQueueSize); + out.writeLong(totalIOTimeInMillis); + } } public DeviceStats[] getDevicesStats() { @@ -451,6 +589,34 @@ public long getTotalWriteKilobytes() { return totalWriteKilobytes; } + /** + * Sum of read time across all devices + */ + public long getTotalReadTime() { + return totalReadTime; + } + + /** + * Sum of write time across all devices + */ + public long getTotalWriteTime() { + return totalWriteTime; + } + + /** + * Sum of queue size across all devices + */ + public long getTotalQueueSize() { + return totalQueueSize; + } + + /** + * Sum of IO time across all devices + */ + public long getTotalIOTimeMillis() { + return totalIOTimeInMillis; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (devicesStats.length > 0) { @@ -468,11 +634,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(WRITE_OPERATIONS, totalWriteOperations); builder.field(READ_KILOBYTES, totalReadKilobytes); builder.field(WRITE_KILOBYTES, totalWriteKilobytes); + + builder.field(READ_TIME, totalReadTime); + builder.field(WRITE_TIME, totalWriteTime); + builder.field(QUEUE_SIZE, totalQueueSize); + builder.field(IO_TIME_MS, totalIOTimeInMillis); builder.endObject(); } return builder; } - } private final long timestamp; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index e20d84cd9763e..f4731a4a34373 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -109,6 +109,25 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, List devicesStats = new ArrayList<>(); + /** + * The /proc/diskstats file displays the I/O statistics of block devices. + * Each line contains the following 14 fields: ( + additional fields ) + * + * 1 major number + * 2 minor number + * 3 device name + * 4 reads completed successfully + * 5 reads merged + * 6 sectors read + * 7 time spent reading (ms) + * 8 writes completed + * 9 writes merged + * 10 sectors written + * 11 time spent writing (ms) + * 12 I/Os currently in progress + * 13 time spent doing I/Os (ms) ---- IO use percent + * 14 weighted time spent doing I/Os (ms) ---- Queue size + */ List lines = readProcDiskStats(); if (!lines.isEmpty()) { for (String line : lines) { @@ -123,6 +142,12 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, final long sectorsRead = Long.parseLong(fields[5]); final long writesCompleted = Long.parseLong(fields[7]); final long sectorsWritten = Long.parseLong(fields[9]); + // readTime and writeTime calculates the total read/write time taken for each request to complete + // ioTime calculates actual time queue and disks are busy + final long readTime = Long.parseLong(fields[6]); + final long writeTime = Long.parseLong(fields[10]); + final long ioTime = fields.length > 12 ? Long.parseLong(fields[12]) : 0; + final long queueSize = fields.length > 13 ? Long.parseLong(fields[13]) : 0; final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -131,6 +156,10 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)) ); devicesStats.add(deviceStats); diff --git a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java index 98229941252ba..a0a14372aa31a 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java @@ -59,13 +59,13 @@ /** * The {@link OsProbe} class retrieves information about the physical and swap size of the machine * memory, as well as the system load average and cpu load. - * + *

                          * In some exceptional cases, it's possible the underlying native methods used by * {@link #getFreePhysicalMemorySize()}, {@link #getTotalPhysicalMemorySize()}, * {@link #getFreeSwapSpaceSize()}, and {@link #getTotalSwapSpaceSize()} can return a * negative value. Because of this, we prevent those methods from returning negative values, * returning 0 instead. - * + *

                          * The OS can report a negative number in a number of cases: * - Non-supported OSes (HP-UX, or AIX) * - A failure of macOS to initialize host statistics @@ -183,11 +183,11 @@ public long getTotalSwapSpaceSize() { /** * The system load averages as an array. - * + *

                          * On Windows, this method returns {@code null}. - * + *

                          * On Linux, this method returns the 1, 5, and 15-minute load averages. - * + *

                          * On macOS, this method should return the 1-minute load average. * * @return the available system load averages or {@code null} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 729b38ca27394..e80b768074fc7 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -136,6 +136,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.recovery.RemoteStoreRestoreService; @@ -167,6 +168,7 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.PersistentTasksExecutor; import org.opensearch.persistent.PersistentTasksExecutorRegistry; @@ -195,6 +197,8 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.transport.AdmissionControlTransportInterceptor; import org.opensearch.repositories.RepositoriesModule; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; @@ -224,6 +228,9 @@ import org.opensearch.tasks.consumer.TopNSearchTasksLogger; import org.opensearch.telemetry.TelemetryModule; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.MetricsRegistryFactory; +import org.opensearch.telemetry.metrics.NoopMetricsRegistryFactory; import org.opensearch.telemetry.tracing.NoopTracerFactory; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.TracerFactory; @@ -392,6 +399,8 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; private final Tracer tracer; + + private final MetricsRegistry metricsRegistry; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference runnableTaskListener; private FileCache fileCache; @@ -591,17 +600,35 @@ protected Node( } TracerFactory tracerFactory; + MetricsRegistryFactory metricsRegistryFactory; if (FeatureFlags.isEnabled(TELEMETRY)) { final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); - List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); - TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); - tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + if (telemetrySettings.isTracingFeatureEnabled() || telemetrySettings.isMetricsFeatureEnabled()) { + List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + if (telemetrySettings.isTracingFeatureEnabled()) { + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + if (telemetrySettings.isMetricsFeatureEnabled()) { + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, telemetryModule.getTelemetry()); + } else { + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + } else { + tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } } else { tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); } tracer = tracerFactory.getTracer(); + metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); resourcesToClose.add(tracer::close); + resourcesToClose.add(metricsRegistry::close); final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -757,6 +784,8 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); + final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool @@ -790,7 +819,8 @@ protected Node( repositoriesServiceReference::get, fileCacheCleaner, searchRequestStats, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + recoverySettings ); final AliasValidator aliasValidator = new AliasValidator(); @@ -864,6 +894,17 @@ protected Node( final RestController restController = actionModule.getRestController(); + final AdmissionControlService admissionControlService = new AdmissionControlService( + settings, + clusterService.getClusterSettings(), + threadPool + ); + + AdmissionControlTransportInterceptor admissionControlTransportInterceptor = new AdmissionControlTransportInterceptor( + admissionControlService + ); + + List transportInterceptors = List.of(admissionControlTransportInterceptor); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class), @@ -876,8 +917,10 @@ protected Node( networkService, restController, clusterService.getClusterSettings(), - tracer + tracer, + transportInterceptors ); + Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( Plugin.class ).stream().map(Plugin::getIndexTemplateMetadataUpgrader).collect(Collectors.toList()); @@ -951,7 +994,7 @@ protected Node( transportService.getTaskManager() ); - final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), @@ -1058,6 +1101,16 @@ protected Node( transportService.getTaskManager(), taskCancellationMonitoringSettings ); + final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + threadPool, + settings, + clusterService.getClusterSettings() + ); + final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( + nodeResourceUsageTracker, + clusterService, + threadPool + ); this.nodeService = new NodeService( settings, threadPool, @@ -1079,7 +1132,10 @@ protected Node( searchBackpressureService, searchPipelineService, fileCache, - taskCancellationMonitoringService + taskCancellationMonitoringService, + resourceUsageCollectorService, + segmentReplicationStatsTracker, + repositoryService ); final SearchService searchService = newSearchService( @@ -1141,6 +1197,7 @@ protected Node( b.bind(IndexingPressureService.class).toInstance(indexingPressureService); b.bind(TaskResourceTrackingService.class).toInstance(taskResourceTrackingService); b.bind(SearchBackpressureService.class).toInstance(searchBackpressureService); + b.bind(AdmissionControlService.class).toInstance(admissionControlService); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); @@ -1200,12 +1257,16 @@ protected Node( b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); + b.bind(NodeResourceUsageTracker.class).toInstance(nodeResourceUsageTracker); + b.bind(ResourceUsageCollectorService.class).toInstance(resourceUsageCollectorService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); b.bind(SearchRequestStats.class).toInstance(searchRequestStats); + b.bind(MetricsRegistry.class).toInstance(metricsRegistry); b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); + b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); }); injector = modules.createInjector(); @@ -1315,6 +1376,8 @@ public Node start() throws NodeValidationException { injector.getInstance(RepositoriesService.class).start(); injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); + injector.getInstance(NodeResourceUsageTracker.class).start(); + injector.getInstance(ResourceUsageCollectorService.class).start(); nodeService.getMonitorService().start(); nodeService.getSearchBackpressureService().start(); nodeService.getTaskCancellationMonitoringService().start(); @@ -1477,6 +1540,8 @@ private Node stop() { injector.getInstance(ClusterService.class).stop(); injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); + injector.getInstance(NodeResourceUsageTracker.class).stop(); + injector.getInstance(ResourceUsageCollectorService.class).stop(); nodeService.getMonitorService().stop(); nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); @@ -1540,6 +1605,10 @@ public synchronized void close() throws IOException { toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_tracker")); + toClose.add(injector.getInstance(NodeResourceUsageTracker.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_collector")); + toClose.add(injector.getInstance(ResourceUsageCollectorService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); toClose.add(injector.getInstance(GatewayService.class)); toClose.add(() -> stopWatch.stop().start("search")); @@ -1571,6 +1640,7 @@ public synchronized void close() throws IOException { toClose.add(stopWatch::stop); if (FeatureFlags.isEnabled(TELEMETRY)) { toClose.add(injector.getInstance(Tracer.class)); + toClose.add(injector.getInstance(MetricsRegistry.class)); } if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java new file mode 100644 index 0000000000000..6ef66d4ac1914 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * This represents the resource usage stats of a node along with the timestamp at which the stats object was created + * in the respective node + */ +public class NodeResourceUsageStats implements Writeable { + final String nodeId; + long timestamp; + double cpuUtilizationPercent; + double memoryUtilizationPercent; + + public NodeResourceUsageStats(String nodeId, long timestamp, double memoryUtilizationPercent, double cpuUtilizationPercent) { + this.nodeId = nodeId; + this.timestamp = timestamp; + this.cpuUtilizationPercent = cpuUtilizationPercent; + this.memoryUtilizationPercent = memoryUtilizationPercent; + } + + public NodeResourceUsageStats(StreamInput in) throws IOException { + this.nodeId = in.readString(); + this.timestamp = in.readLong(); + this.cpuUtilizationPercent = in.readDouble(); + this.memoryUtilizationPercent = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.nodeId); + out.writeLong(this.timestamp); + out.writeDouble(this.cpuUtilizationPercent); + out.writeDouble(this.memoryUtilizationPercent); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("NodeResourceUsageStats["); + sb.append(nodeId).append("]("); + sb.append("Timestamp: ").append(timestamp); + sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", cpuUtilizationPercent)); + sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", memoryUtilizationPercent)); + sb.append(")"); + return sb.toString(); + } + + NodeResourceUsageStats(NodeResourceUsageStats nodeResourceUsageStats) { + this( + nodeResourceUsageStats.nodeId, + nodeResourceUsageStats.timestamp, + nodeResourceUsageStats.memoryUtilizationPercent, + nodeResourceUsageStats.cpuUtilizationPercent + ); + } + + public double getMemoryUtilizationPercent() { + return memoryUtilizationPercent; + } + + public double getCpuUtilizationPercent() { + return cpuUtilizationPercent; + } + + public long getTimestamp() { + return timestamp; + } +} diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 2688b894cb9a7..49dde0b81cac7 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -48,11 +48,13 @@ import org.opensearch.discovery.Discovery; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndicesService; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.plugins.PluginsService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.search.backpressure.SearchBackpressureService; @@ -83,6 +85,7 @@ public class NodeService implements Closeable { private final ScriptService scriptService; private final HttpServerTransport httpServerTransport; private final ResponseCollectorService responseCollectorService; + private final ResourceUsageCollectorService resourceUsageCollectorService; private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; @@ -92,6 +95,9 @@ public class NodeService implements Closeable { private final Discovery discovery; private final FileCache fileCache; private final TaskCancellationMonitoringService taskCancellationMonitoringService; + private final RepositoriesService repositoriesService; + + private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; NodeService( Settings settings, @@ -114,7 +120,10 @@ public class NodeService implements Closeable { SearchBackpressureService searchBackpressureService, SearchPipelineService searchPipelineService, FileCache fileCache, - TaskCancellationMonitoringService taskCancellationMonitoringService + TaskCancellationMonitoringService taskCancellationMonitoringService, + ResourceUsageCollectorService resourceUsageCollectorService, + SegmentReplicationStatsTracker segmentReplicationStatsTracker, + RepositoriesService repositoriesService ) { this.settings = settings; this.threadPool = threadPool; @@ -137,8 +146,11 @@ public class NodeService implements Closeable { this.clusterService = clusterService; this.fileCache = fileCache; this.taskCancellationMonitoringService = taskCancellationMonitoringService; + this.resourceUsageCollectorService = resourceUsageCollectorService; + this.repositoriesService = repositoriesService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); + this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; } public NodeInfo info( @@ -217,7 +229,10 @@ public NodeStats stats( boolean weightedRoutingStats, boolean fileCacheStats, boolean taskCancellation, - boolean searchPipelineStats + boolean searchPipelineStats, + boolean resourceUsageStats, + boolean segmentReplicationTrackerStats, + boolean repositoriesStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -237,6 +252,7 @@ public NodeStats stats( discoveryStats ? discovery.stats() : null, ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, + resourceUsageStats ? resourceUsageCollectorService.stats() : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressureService.nodeStats() : null, shardIndexingPressure ? this.indexingPressureService.shardStats(indices) : null, @@ -245,7 +261,9 @@ public NodeStats stats( weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, taskCancellation ? this.taskCancellationMonitoringService.stats() : null, - searchPipelineStats ? this.searchPipelineService.stats() : null + searchPipelineStats ? this.searchPipelineService.stats() : null, + segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, + repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null ); } diff --git a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java new file mode 100644 index 0000000000000..3dff9a27f71a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +/** + * This class represents resource usage stats such as CPU, Memory and IO resource usage of each node along with the + * timestamp of the stats recorded. + */ +public class NodesResourceUsageStats implements Writeable, ToXContentFragment { + + // Map of node id to resource usage stats of the corresponding node. + private final Map nodeIdToResourceUsageStatsMap; + + public NodesResourceUsageStats(Map nodeIdToResourceUsageStatsMap) { + this.nodeIdToResourceUsageStatsMap = nodeIdToResourceUsageStatsMap; + } + + public NodesResourceUsageStats(StreamInput in) throws IOException { + this.nodeIdToResourceUsageStatsMap = in.readMap(StreamInput::readString, NodeResourceUsageStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.nodeIdToResourceUsageStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + + /** + * Returns map of node id to resource usage stats of the corresponding node. + */ + public Map getNodeIdToResourceUsageStatsMap() { + return nodeIdToResourceUsageStatsMap; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("resource_usage_stats"); + for (String nodeId : nodeIdToResourceUsageStatsMap.keySet()) { + builder.startObject(nodeId); + NodeResourceUsageStats resourceUsageStats = nodeIdToResourceUsageStatsMap.get(nodeId); + if (resourceUsageStats != null) { + builder.field("timestamp", resourceUsageStats.timestamp); + builder.field("cpu_utilization_percent", String.format(Locale.ROOT, "%.1f", resourceUsageStats.cpuUtilizationPercent)); + builder.field( + "memory_utilization_percent", + String.format(Locale.ROOT, "%.1f", resourceUsageStats.memoryUtilizationPercent) + ); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java new file mode 100644 index 0000000000000..f1c763e09f147 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentMap; + +/** + * This collects node level resource usage statistics such as cpu, memory, IO of each node and makes it available for + * coordinator node to aid in throttling, ranking etc + */ +public class ResourceUsageCollectorService extends AbstractLifecycleComponent implements ClusterStateListener { + + /** + * This refresh interval denotes the polling interval of ResourceUsageCollectorService to refresh the resource usage + * stats from local node + */ + private static long REFRESH_INTERVAL_IN_MILLIS = 1000; + + private static final Logger logger = LogManager.getLogger(ResourceUsageCollectorService.class); + private final ConcurrentMap nodeIdToResourceUsageStats = ConcurrentCollections.newConcurrentMap(); + + private ThreadPool threadPool; + private volatile Scheduler.Cancellable scheduledFuture; + + private NodeResourceUsageTracker nodeResourceUsageTracker; + private ClusterService clusterService; + + public ResourceUsageCollectorService( + NodeResourceUsageTracker nodeResourceUsageTracker, + ClusterService clusterService, + ThreadPool threadPool + ) { + this.threadPool = threadPool; + this.nodeResourceUsageTracker = nodeResourceUsageTracker; + this.clusterService = clusterService; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesRemoved()) { + for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) { + removeNodeResourceUsageStats(removedNode.getId()); + } + } + } + + void removeNodeResourceUsageStats(String nodeId) { + nodeIdToResourceUsageStats.remove(nodeId); + } + + /** + * Collect node resource usage stats along with the timestamp + */ + public void collectNodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent + ) { + nodeIdToResourceUsageStats.compute(nodeId, (id, resourceUsageStats) -> { + if (resourceUsageStats == null) { + return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent); + } else { + resourceUsageStats.cpuUtilizationPercent = cpuUtilizationPercent; + resourceUsageStats.memoryUtilizationPercent = memoryUtilizationPercent; + resourceUsageStats.timestamp = timestamp; + return resourceUsageStats; + } + }); + } + + /** + * Get all node resource usage statistics which will be used for node stats + */ + public Map getAllNodeStatistics() { + Map nodeStats = new HashMap<>(nodeIdToResourceUsageStats.size()); + nodeIdToResourceUsageStats.forEach((nodeId, resourceUsageStats) -> { + nodeStats.put(nodeId, new NodeResourceUsageStats(resourceUsageStats)); + }); + return nodeStats; + } + + /** + * Optionally return a {@code NodeResourceUsageStats} for the given nodeid, if + * resource usage stats information exists for the given node. Returns an empty + * {@code Optional} if the node was not found. + */ + public Optional getNodeStatistics(final String nodeId) { + return Optional.ofNullable(nodeIdToResourceUsageStats.get(nodeId)) + .map(resourceUsageStats -> new NodeResourceUsageStats(resourceUsageStats)); + } + + /** + * Returns collected resource usage statistics of all nodes + */ + public NodesResourceUsageStats stats() { + return new NodesResourceUsageStats(getAllNodeStatistics()); + } + + /** + * Fetch local node resource usage statistics and add it to store along with the current timestamp + */ + private void collectLocalNodeResourceUsageStats() { + if (nodeResourceUsageTracker.isReady() && clusterService.state() != null) { + collectNodeResourceUsageStats( + clusterService.state().nodes().getLocalNodeId(), + System.currentTimeMillis(), + nodeResourceUsageTracker.getMemoryUtilizationPercent(), + nodeResourceUsageTracker.getCpuUtilizationPercent() + ); + } + } + + @Override + protected void doStart() { + /** + * Fetch local node resource usage statistics every second + */ + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + collectLocalNodeResourceUsageStats(); + } catch (Exception e) { + logger.warn("failure in ResourceUsageCollectorService", e); + } + }, new TimeValue(REFRESH_INTERVAL_IN_MILLIS), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index 26c078353d12a..ca2413a057a6b 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -17,6 +17,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -133,10 +134,19 @@ public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode boolean repositoryAlreadyPresent = false; for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { - if (newRepositoryMetadata.equalsIgnoreGenerations(existingRepositoryMetadata)) { + try { + // This will help in handling two scenarios - + // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository + // metadata constructed from the node attributes of the joining node will be validated + // against the repository information provided by existing nodes in cluster state. + // 2. It's possible to update repository settings except the restricted ones post the + // creation of a system repository and if a node drops we will need to allow it to join + // even if the non-restricted system repository settings are now different. + repositoriesService.get().ensureValidSystemRepositoryUpdate(newRepositoryMetadata, existingRepositoryMetadata); + newRepositoryMetadata = existingRepositoryMetadata; repositoryAlreadyPresent = true; break; - } else { + } catch (RepositoryException e) { throw new IllegalStateException( "new repository metadata [" + newRepositoryMetadata diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java new file mode 100644 index 0000000000000..f83a1b7f9fc05 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for sliding window resource usage trackers + */ +public abstract class AbstractAverageUsageTracker extends AbstractLifecycleComponent { + private static final Logger LOGGER = LogManager.getLogger(AbstractAverageUsageTracker.class); + + private final ThreadPool threadPool; + private final TimeValue pollingInterval; + private TimeValue windowDuration; + private final AtomicReference observations = new AtomicReference<>(); + + private volatile Scheduler.Cancellable scheduledFuture; + + public AbstractAverageUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + this.threadPool = threadPool; + this.pollingInterval = pollingInterval; + this.windowDuration = windowDuration; + this.setWindowSize(windowDuration); + } + + public abstract long getUsage(); + + /** + * Returns the moving average of the datapoints + */ + public double getAverage() { + return observations.get().getAverage(); + } + + /** + * Checks if we have datapoints more than or equal to the window size + */ + public boolean isReady() { + return observations.get().isReady(); + } + + /** + * Creates a new instance of MovingAverage with a new window size based on WindowDuration + */ + public void setWindowSize(TimeValue windowDuration) { + this.windowDuration = windowDuration; + int windowSize = (int) (windowDuration.nanos() / pollingInterval.nanos()); + LOGGER.debug("updated window size: {}", windowSize); + observations.set(new MovingAverage(windowSize)); + } + + public TimeValue getPollingInterval() { + return pollingInterval; + } + + public TimeValue getWindowDuration() { + return windowDuration; + } + + public long getWindowSize() { + return observations.get().getCount(); + } + + public void recordUsage(long usage) { + observations.get().record(usage); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java new file mode 100644 index 0000000000000..160d385762eb0 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.threadpool.ThreadPool; + +/** + * AverageCpuUsageTracker tracks the average CPU usage by polling the CPU usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageCpuUsageTracker extends AbstractAverageUsageTracker { + private static final Logger LOGGER = LogManager.getLogger(AverageCpuUsageTracker.class); + + public AverageCpuUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Returns the process CPU usage in percent + */ + @Override + public long getUsage() { + long usage = ProcessProbe.getInstance().getProcessCpuPercent(); + LOGGER.debug("Recording cpu usage: {}%", usage); + return usage; + } + +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java new file mode 100644 index 0000000000000..c1d1c83656859 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; + +/** + * AverageMemoryUsageTracker tracks the average JVM usage by polling the JVM usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageMemoryUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageMemoryUsageTracker.class); + + private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); + + public AverageMemoryUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Get current memory usage percentage calculated against max heap memory + */ + @Override + public long getUsage() { + long usage = MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed() * 100 / MEMORY_MX_BEAN.getHeapMemoryUsage().getMax(); + LOGGER.debug("Recording memory usage: {}%", usage); + return usage; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java new file mode 100644 index 0000000000000..cf5f38c1b004c --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +/** + * This tracks the usage of node resources such as CPU, IO and memory + */ +public class NodeResourceUsageTracker extends AbstractLifecycleComponent { + private ThreadPool threadPool; + private final ClusterSettings clusterSettings; + private AverageCpuUsageTracker cpuUsageTracker; + private AverageMemoryUsageTracker memoryUsageTracker; + + private ResourceTrackerSettings resourceTrackerSettings; + + public NodeResourceUsageTracker(ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + this.threadPool = threadPool; + this.clusterSettings = clusterSettings; + this.resourceTrackerSettings = new ResourceTrackerSettings(settings); + initialize(); + } + + /** + * Return CPU utilization average if we have enough datapoints, otherwise return 0 + */ + public double getCpuUtilizationPercent() { + if (cpuUsageTracker.isReady()) { + return cpuUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Return memory utilization average if we have enough datapoints, otherwise return 0 + */ + public double getMemoryUtilizationPercent() { + if (memoryUsageTracker.isReady()) { + return memoryUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Checks if all of the resource usage trackers are ready + */ + public boolean isReady() { + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady(); + } + + void initialize() { + cpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + resourceTrackerSettings.getCpuPollingInterval(), + resourceTrackerSettings.getCpuWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + this::setCpuWindowDuration + ); + + memoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + resourceTrackerSettings.getMemoryPollingInterval(), + resourceTrackerSettings.getMemoryWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + this::setMemoryWindowDuration + ); + } + + private void setMemoryWindowDuration(TimeValue windowDuration) { + memoryUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setMemoryWindowDuration(windowDuration); + } + + private void setCpuWindowDuration(TimeValue windowDuration) { + cpuUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setCpuWindowDuration(windowDuration); + } + + /** + * Visible for testing + */ + ResourceTrackerSettings getResourceTrackerSettings() { + return resourceTrackerSettings; + } + + @Override + protected void doStart() { + cpuUsageTracker.doStart(); + memoryUsageTracker.doStart(); + } + + @Override + protected void doStop() { + cpuUsageTracker.doStop(); + memoryUsageTracker.doStop(); + } + + @Override + protected void doClose() { + cpuUsageTracker.doClose(); + memoryUsageTracker.doClose(); + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java new file mode 100644 index 0000000000000..f81b008ba7e8b --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +/** + * Settings related to resource usage trackers such as polling interval, window duration etc + */ +public class ResourceTrackerSettings { + + private static class Defaults { + /** + * This is the default polling interval of usage trackers to get the resource utilization data + */ + private static final long POLLING_INTERVAL_IN_MILLIS = 500; + /** + * This is the default window duration on which the average resource utilization values will be calculated + */ + private static final long WINDOW_DURATION_IN_SECONDS = 30; + } + + public static final Setting GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile TimeValue cpuWindowDuration; + private volatile TimeValue cpuPollingInterval; + private volatile TimeValue memoryWindowDuration; + private volatile TimeValue memoryPollingInterval; + + public ResourceTrackerSettings(Settings settings) { + this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + } + + public TimeValue getCpuWindowDuration() { + return this.cpuWindowDuration; + } + + public TimeValue getCpuPollingInterval() { + return cpuPollingInterval; + } + + public TimeValue getMemoryPollingInterval() { + return memoryPollingInterval; + } + + public TimeValue getMemoryWindowDuration() { + return memoryWindowDuration; + } + + public void setCpuWindowDuration(TimeValue cpuWindowDuration) { + this.cpuWindowDuration = cpuWindowDuration; + } + + public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { + this.memoryWindowDuration = memoryWindowDuration; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java new file mode 100644 index 0000000000000..aace2a019973e --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Node level resource usage stats tracker package + */ +package org.opensearch.node.resource.tracker; diff --git a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java index 209df1e1f498d..403630b89e42a 100644 --- a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java +++ b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java @@ -37,7 +37,7 @@ /** * This component is responsible for execution of persistent tasks. - * + *

                          * It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index 89bb23930b063..4e38fb34dbf17 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -323,7 +323,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * This unassigns a task from any node, i.e. it is assigned to a {@code null} node with the provided reason. - * + *

                          * Since the assignment executor node is null, the {@link PersistentTasksClusterService} will attempt to reassign it to a valid * node quickly. * diff --git a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java index 7e7345811246f..3552c8286b7a3 100644 --- a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java @@ -46,9 +46,9 @@ public interface CircuitBreakerPlugin { /** * Each of the factory functions are passed to the configured {@link CircuitBreakerService}. - * + *

                          * The service then constructs a {@link CircuitBreaker} given the resulting {@link BreakerSettings}. - * + *

                          * Custom circuit breakers settings can be found in {@link BreakerSettings}. * See: * - limit (example: `breaker.foo.limit`) {@link BreakerSettings#CIRCUIT_BREAKER_LIMIT_SETTING} @@ -63,7 +63,7 @@ public interface CircuitBreakerPlugin { /** * The passed {@link CircuitBreaker} object is the same one that was constructed by the {@link BreakerSettings} * provided by {@link CircuitBreakerPlugin#getCircuitBreaker(Settings)}. - * + *

                          * This reference should never change throughout the lifetime of the node. * * @param circuitBreaker The constructed {@link CircuitBreaker} object from the {@link BreakerSettings} diff --git a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java index c2e147b86d17f..1edd9f52d97a7 100644 --- a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java @@ -64,7 +64,7 @@ default Collection createAllocationDeciders(Settings settings /** * Return {@link ShardsAllocator} implementations added by this plugin. - * + *

                          * The key of the returned {@link Map} is the name of the allocator, and the value * is a function to construct the allocator. * @@ -88,7 +88,7 @@ default Map getExistingShardsAllocators() { /** * Called when the node is started * - * DEPRECATED: Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. + * @deprecated Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. */ @Deprecated default void onNodeStarted() {} diff --git a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java index bca72942bd70e..63f0d826b592f 100644 --- a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java @@ -68,13 +68,10 @@ public interface DiscoveryPlugin { * This can be handy if you want to provide your own Network interface name like _mycard_ * and implement by yourself the logic to get an actual IP address/hostname based on this * name. - * + *

                          * For example: you could call a third party service (an API) to resolve _mycard_. * Then you could define in opensearch.yml settings like: - * - *

                          {@code
                          -     * network.host: _mycard_
                          -     * }
                          + * {@code network.host: _mycard_ } */ default NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { return null; @@ -82,7 +79,7 @@ default NetworkService.CustomNameResolver getCustomNameResolver(Settings setting /** * Returns providers of seed hosts for discovery. - * + *

                          * The key of the returned map is the name of the host provider * (see {@link org.opensearch.discovery.DiscoveryModule#DISCOVERY_SEED_PROVIDERS_SETTING}), and * the value is a supplier to construct the host provider when it is selected for use. diff --git a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java index 92ae2ff9cd661..5ea5442d84ffa 100644 --- a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java @@ -92,7 +92,7 @@ default Optional getCustomCodecServiceFactory(IndexSettings * When an index is created this method is invoked for each engine plugin. Engine plugins that need to provide a * custom {@link TranslogDeletionPolicy} can override this method to return a function that takes the {@link IndexSettings} * and a {@link Supplier} for {@link RetentionLeases} and returns a custom {@link TranslogDeletionPolicy}. - * + *

                          * Only one of the installed Engine plugins can override this otherwise {@link IllegalStateException} will be thrown. * * @return a function that returns an instance of {@link TranslogDeletionPolicy} diff --git a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java index 4dd4010383934..367d335ac4fea 100644 --- a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java @@ -36,7 +36,7 @@ /** * An extension point for {@link Plugin} implementations to be themselves extensible. - * + *

                          * This class provides a callback for extensible plugins to be informed of other plugins * which extend them. * @@ -62,7 +62,7 @@ interface ExtensionLoader { /** * Allow this plugin to load extensions from other plugins. - * + *

                          * This method is called once only, after initializing this plugin and all plugins extending this plugin. It is called before * any other methods on this Plugin instance are called. */ diff --git a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java index 00f3f8aff585c..410535504f0dd 100644 --- a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java @@ -19,16 +19,14 @@ public interface IdentityPlugin { /** - * Get the current subject - * - * Should never return null + * Get the current subject. + * @return Should never return null * */ public Subject getSubject(); /** * Get the Identity Plugin's token manager implementation - * - * Should never return null + * @return Should never return null. */ public TokenManager getTokenManager(); } diff --git a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java index dfc11aa8eea55..dc4f22de71344 100644 --- a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java @@ -46,7 +46,7 @@ public interface IngestPlugin { /** * Returns additional ingest processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.ingest.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index f2f8e84f04e02..07df40bafe6a1 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -83,7 +83,8 @@ default Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.emptyMap(); } diff --git a/server/src/main/java/org/opensearch/plugins/Plugin.java b/server/src/main/java/org/opensearch/plugins/Plugin.java index 0743cd3807eff..48486a6b55dfd 100644 --- a/server/src/main/java/org/opensearch/plugins/Plugin.java +++ b/server/src/main/java/org/opensearch/plugins/Plugin.java @@ -121,7 +121,7 @@ public Collection> getGuiceServiceClasses() /** * Returns components added by this plugin. - * + *

                          * Any components returned that implement {@link LifecycleComponent} will have their lifecycle managed. * Note: To aid in the migration away from guice, all objects returned as components will be bound in guice * to themselves. diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 47195a0264750..cc9cc5b5b5fbf 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -468,7 +468,7 @@ private static Bundle readPluginBundle(final Set bundles, final Path plu /** * Return the given bundles, sorted in dependency loading order. - * + *

                          * This sort is stable, so that if two plugins do not have any interdependency, * their relative order from iteration of the provided set will not change. * diff --git a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java index 189ba3cfc16ab..09233d49f3aea 100644 --- a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java @@ -52,9 +52,8 @@ public interface RepositoryPlugin { * Returns repository types added by this plugin. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map getRepositories( Environment env, @@ -70,9 +69,8 @@ default Map getRepositories( * through the external API. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map getInternalRepositories( Environment env, diff --git a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java index d2ef2b65c5944..7288a8caaec58 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java @@ -35,7 +35,7 @@ public interface SearchPipelinePlugin { /** * Returns additional search pipeline request processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -46,7 +46,7 @@ default Map> getRequestProcess /** * Returns additional search pipeline response processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -57,7 +57,7 @@ default Map> getResponseProce /** * Returns additional search pipeline search phase results processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index 8b5920e1f4485..40b4f97cd1897 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -551,8 +551,8 @@ class CompositeAggregationSpec { private final Consumer aggregatorRegistrar; private final Class valueSourceBuilderClass; @Deprecated - /** This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of - * byte code + /* This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of + byte code */ private Byte byteCode; private final CompositeAggregationParsingFunction parsingFunction; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java new file mode 100644 index 0000000000000..2cc409b0e4465 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CPUBasedAdmissionController; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER; + +/** + * Admission control Service that bootstraps and manages all the Admission Controllers in OpenSearch. + */ +public class AdmissionControlService { + private final ThreadPool threadPool; + public final AdmissionControlSettings admissionControlSettings; + private final ConcurrentMap ADMISSION_CONTROLLERS; + private static final Logger logger = LogManager.getLogger(AdmissionControlService.class); + private final ClusterSettings clusterSettings; + private final Settings settings; + + /** + * + * @param settings Immutable settings instance + * @param clusterSettings ClusterSettings Instance + * @param threadPool ThreadPool Instance + */ + public AdmissionControlService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.threadPool = threadPool; + this.admissionControlSettings = new AdmissionControlSettings(clusterSettings, settings); + this.ADMISSION_CONTROLLERS = new ConcurrentHashMap<>(); + this.clusterSettings = clusterSettings; + this.settings = settings; + this.initialise(); + } + + /** + * Initialise and Register all the admissionControllers + */ + private void initialise() { + // Initialise different type of admission controllers + registerAdmissionController(CPU_BASED_ADMISSION_CONTROLLER); + } + + /** + * Handler to trigger registered admissionController + */ + public void applyTransportAdmissionControl(String action) { + this.ADMISSION_CONTROLLERS.forEach((name, admissionController) -> { admissionController.apply(action); }); + } + + /** + * + * @param admissionControllerName admissionControllerName to register into the service. + */ + public void registerAdmissionController(String admissionControllerName) { + AdmissionController admissionController = this.controllerFactory(admissionControllerName); + this.ADMISSION_CONTROLLERS.put(admissionControllerName, admissionController); + } + + /** + * @return AdmissionController Instance + */ + private AdmissionController controllerFactory(String admissionControllerName) { + switch (admissionControllerName) { + case CPU_BASED_ADMISSION_CONTROLLER: + return new CPUBasedAdmissionController(admissionControllerName, this.settings, this.clusterSettings); + default: + throw new IllegalArgumentException("Not Supported AdmissionController : " + admissionControllerName); + } + } + + /** + * + * @return list of the registered admissionControllers + */ + public List getAdmissionControllers() { + return new ArrayList<>(this.ADMISSION_CONTROLLERS.values()); + } + + /** + * + * @param controllerName name of the admissionController + * @return instance of the AdmissionController Instance + */ + public AdmissionController getAdmissionController(String controllerName) { + return this.ADMISSION_CONTROLLERS.getOrDefault(controllerName, null); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java new file mode 100644 index 0000000000000..b557190ab54ac --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +/** + * Settings related to admission control. + * @opensearch.internal + */ +public final class AdmissionControlSettings { + + /** + * Default parameters for the AdmissionControlSettings + */ + public static class Defaults { + public static final String MODE = "disabled"; + } + + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting ADMISSION_CONTROL_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.mode", + Defaults.MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private volatile AdmissionControlMode transportLayeradmissionControlMode; + + /** + * @param clusterSettings clusterSettings Instance + * @param settings settings instance + */ + public AdmissionControlSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayeradmissionControlMode = ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, this::setAdmissionControlTransportLayerMode); + } + + /** + * + * @param admissionControlMode update the mode of admission control feature + */ + private void setAdmissionControlTransportLayerMode(AdmissionControlMode admissionControlMode) { + this.transportLayeradmissionControlMode = admissionControlMode; + } + + /** + * + * @return return the default mode of the admissionControl + */ + public AdmissionControlMode getAdmissionControlTransportLayerMode() { + return this.transportLayeradmissionControlMode; + } + + /** + * + * @return true based on the admission control feature is enforced else false + */ + public Boolean isTransportLayerAdmissionControlEnforced() { + return this.transportLayeradmissionControlMode == AdmissionControlMode.ENFORCED; + } + + /** + * + * @return true based on the admission control feature is enabled else false + */ + public Boolean isTransportLayerAdmissionControlEnabled() { + return this.transportLayeradmissionControlMode != AdmissionControlMode.DISABLED; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java new file mode 100644 index 0000000000000..00564a9967f31 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Abstract class for Admission Controller in OpenSearch, which aims to provide resource based request admission control. + * It provides methods for any tracking-object that can be incremented (such as memory size), + * and admission control can be applied if configured limit has been reached + */ +public abstract class AdmissionController { + + private final AtomicLong rejectionCount; + private final String admissionControllerName; + + /** + * + * @param rejectionCount initialised rejectionCount value for AdmissionController + * @param admissionControllerName name of the admissionController + */ + public AdmissionController(AtomicLong rejectionCount, String admissionControllerName) { + this.rejectionCount = rejectionCount; + this.admissionControllerName = admissionControllerName; + } + + /** + * Return the current state of the admission controller + * @return true if admissionController is enabled for the transport layer else false + */ + public boolean isEnabledForTransportLayer(AdmissionControlMode admissionControlMode) { + return admissionControlMode != AdmissionControlMode.DISABLED; + } + + /** + * Increment the tracking-objects and apply the admission control if threshold is breached. + * Mostly applicable while applying admission controller + */ + public abstract void apply(String action); + + /** + * @return name of the admission-controller + */ + public String getName() { + return this.admissionControllerName; + } + + /** + * Adds the rejection count for the controller. Primarily used when copying controller states. + * @param count To add the value of the tracking resource object as the provided count + */ + public void addRejectionCount(long count) { + this.rejectionCount.addAndGet(count); + } + + /** + * @return current value of the rejection count metric tracked by the admission-controller. + */ + public long getRejectionCount() { + return this.rejectionCount.get(); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java new file mode 100644 index 0000000000000..3a8956b2cce87 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Class for CPU Based Admission Controller in OpenSearch, which aims to provide CPU utilisation admission control. + * It provides methods to apply admission control if configured limit has been reached + */ +public class CPUBasedAdmissionController extends AdmissionController { + private static final Logger LOGGER = LogManager.getLogger(CPUBasedAdmissionController.class); + public CPUBasedAdmissionControllerSettings settings; + + /** + * + * @param admissionControllerName State of the admission controller + */ + public CPUBasedAdmissionController(String admissionControllerName, Settings settings, ClusterSettings clusterSettings) { + super(new AtomicLong(0), admissionControllerName); + this.settings = new CPUBasedAdmissionControllerSettings(clusterSettings, settings); + } + + /** + * This function will take of applying admission controller based on CPU usage + * @param action is the transport action + */ + @Override + public void apply(String action) { + // TODO Will extend this logic further currently just incrementing rejectionCount + if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { + this.applyForTransportLayer(action); + } + } + + private void applyForTransportLayer(String actionName) { + // currently incrementing counts to evaluate the controller triggering as expected and using in testing so limiting to 10 + // TODO will update rejection logic further in next PR's + if (this.getRejectionCount() < 10) { + this.addRejectionCount(1); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java new file mode 100644 index 0000000000000..23746cc61a203 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes related to the different admission controllers + */ +package org.opensearch.ratelimitting.admissioncontrol.controllers; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java new file mode 100644 index 0000000000000..2ae2436ba84e7 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import java.util.Locale; + +/** + * Defines the AdmissionControlMode + */ +public enum AdmissionControlMode { + /** + * AdmissionController is completely disabled. + */ + DISABLED("disabled"), + + /** + * AdmissionController only monitors the rejection criteria for the requests. + */ + MONITOR("monitor_only"), + + /** + * AdmissionController monitors and rejects tasks that exceed resource usage thresholds. + */ + ENFORCED("enforced"); + + private final String mode; + + /** + * @param mode update mode of the admission controller + */ + AdmissionControlMode(String mode) { + this.mode = mode; + } + + /** + * + * @return mode of the admission controller + */ + public String getMode() { + return this.mode; + } + + /** + * + * @param name is the mode of the current + * @return Enum of AdmissionControlMode based on the mode + */ + public static AdmissionControlMode fromName(String name) { + switch (name.toLowerCase(Locale.ROOT)) { + case "disabled": + return DISABLED; + case "monitor_only": + return MONITOR; + case "enforced": + return ENFORCED; + default: + throw new IllegalArgumentException("Invalid AdmissionControlMode: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java new file mode 100644 index 0000000000000..f2fdca0cfe49b --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import java.util.Locale; + +/** + * Enums that defines the type of the transport requests + */ +public enum TransportActionType { + INDEXING("indexing"), + SEARCH("search"); + + private final String type; + + TransportActionType(String uriType) { + this.type = uriType; + } + + /** + * + * @return type of the request + */ + public String getType() { + return type; + } + + public static TransportActionType fromName(String name) { + name = name.toLowerCase(Locale.ROOT); + switch (name) { + case "indexing": + return INDEXING; + case "search": + return SEARCH; + default: + throw new IllegalArgumentException("Not Supported TransportAction Type: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java new file mode 100644 index 0000000000000..98b08ebd0a7bf --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains enums related to the different admission controller feature + */ +package org.opensearch.ratelimitting.admissioncontrol.enums; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java new file mode 100644 index 0000000000000..b3dc229f86fb6 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains base classes needed for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java new file mode 100644 index 0000000000000..141e9b68db145 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +import java.util.Arrays; +import java.util.List; + +/** + * Settings related to cpu based admission controller. + * @opensearch.internal + */ +public class CPUBasedAdmissionControllerSettings { + public static final String CPU_BASED_ADMISSION_CONTROLLER = "global_cpu_usage"; + + /** + * Default parameters for the CPUBasedAdmissionControllerSettings + */ + public static class Defaults { + public static final long CPU_USAGE = 95; + public static List TRANSPORT_LAYER_DEFAULT_URI_TYPE = Arrays.asList("indexing", "search"); + } + + private AdmissionControlMode transportLayerMode; + private Long searchCPULimit; + private Long indexingCPULimit; + + private final List transportActionsList; + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.cpu_usage.mode_override", + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the CPU Limits for the search requests by default it will use default IO usage limit + */ + public static final Setting SEARCH_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.search.cpu_usage.limit", + Defaults.CPU_USAGE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the CPU limits for the indexing requests by default it will use default IO usage limit + */ + public static final Setting INDEXING_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.indexing.cpu_usage.limit", + Defaults.CPU_USAGE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + // currently limited to one setting will add further more settings in follow-up PR's + public CPUBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayerMode = CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); + this.searchCPULimit = SEARCH_CPU_USAGE_LIMIT.get(settings); + this.indexingCPULimit = INDEXING_CPU_USAGE_LIMIT.get(settings); + this.transportActionsList = Defaults.TRANSPORT_LAYER_DEFAULT_URI_TYPE; + clusterSettings.addSettingsUpdateConsumer(INDEXING_CPU_USAGE_LIMIT, this::setIndexingCPULimit); + clusterSettings.addSettingsUpdateConsumer(SEARCH_CPU_USAGE_LIMIT, this::setSearchCPULimit); + } + + private void setTransportLayerMode(AdmissionControlMode admissionControlMode) { + this.transportLayerMode = admissionControlMode; + } + + public AdmissionControlMode getTransportLayerAdmissionControllerMode() { + return transportLayerMode; + } + + public Long getSearchCPULimit() { + return searchCPULimit; + } + + public Long getIndexingCPULimit() { + return indexingCPULimit; + } + + public void setIndexingCPULimit(Long indexingCPULimit) { + this.indexingCPULimit = indexingCPULimit; + } + + public void setSearchCPULimit(Long searchCPULimit) { + this.searchCPULimit = searchCPULimit; + } + + public List getTransportActionsList() { + return transportActionsList; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java new file mode 100644 index 0000000000000..a024ccc756745 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * This package contains settings related classes for the different admission controllers + */ +package org.opensearch.ratelimitting.admissioncontrol.settings; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java new file mode 100644 index 0000000000000..7d0f5fbc17a51 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * AdmissionControl Handler to intercept Transport Requests. + * @param Transport Request + */ +public class AdmissionControlTransportHandler implements TransportRequestHandler { + + private final String action; + private final TransportRequestHandler actualHandler; + protected final Logger log = LogManager.getLogger(this.getClass()); + AdmissionControlService admissionControlService; + boolean forceExecution; + + public AdmissionControlTransportHandler( + String action, + TransportRequestHandler actualHandler, + AdmissionControlService admissionControlService, + boolean forceExecution + ) { + super(); + this.action = action; + this.actualHandler = actualHandler; + this.admissionControlService = admissionControlService; + this.forceExecution = forceExecution; + } + + /** + * @param request Transport Request that landed on the node + * @param channel Transport channel allows to send a response to a request + * @param task Current task that is executing + * @throws Exception when admission control rejected the requests + */ + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + // intercept all the transport requests here and apply admission control + try { + // TODO Need to evaluate if we need to apply admission control or not if force Execution is true will update in next PR. + this.admissionControlService.applyTransportAdmissionControl(this.action); + } catch (final OpenSearchRejectedExecutionException openSearchRejectedExecutionException) { + log.warn(openSearchRejectedExecutionException.getMessage()); + channel.sendResponse(openSearchRejectedExecutionException); + } catch (final Exception e) { + throw e; + } + actualHandler.messageReceived(request, channel, task); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java new file mode 100644 index 0000000000000..01cfcbd780006 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.transport.TransportInterceptor; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * This class allows throttling to intercept requests on both the sender and the receiver side. + */ +public class AdmissionControlTransportInterceptor implements TransportInterceptor { + + AdmissionControlService admissionControlService; + + public AdmissionControlTransportInterceptor(AdmissionControlService admissionControlService) { + this.admissionControlService = admissionControlService; + } + + /** + * + * @return admissionController handler to intercept transport requests + */ + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + return new AdmissionControlTransportHandler<>(action, actualHandler, this.admissionControlService, forceExecution); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java new file mode 100644 index 0000000000000..f97f31bc7b1db --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * This package contains transport related classes for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol.transport; diff --git a/server/src/main/java/org/opensearch/ratelimitting/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/package-info.java new file mode 100644 index 0000000000000..c04358e14284f --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base OpenSearch Throttling package + */ +package org.opensearch.ratelimitting; diff --git a/server/src/main/java/org/opensearch/repositories/IndexId.java b/server/src/main/java/org/opensearch/repositories/IndexId.java index 0b0644ce932e5..d88eeb5aa9c49 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexId.java +++ b/server/src/main/java/org/opensearch/repositories/IndexId.java @@ -80,7 +80,7 @@ public String getName() { * The unique ID for the index within the repository. This is *not* the same as the * index's UUID, but merely a unique file/URL friendly identifier that a repository can * use to name blobs for the index. - * + *

                          * We could not use the index's actual UUID (See {@link Index#getUUID()}) because in the * case of snapshot/restore, the index UUID in the snapshotted index will be different * from the index UUID assigned to it when it is restored. Hence, the actual index UUID diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 3d6679b3ef80e..68669feb16abc 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -84,6 +84,7 @@ import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. @@ -154,7 +155,7 @@ public RepositoriesService( } /** - * Registers new repository in the cluster + * Registers new repository or updates an existing repository in the cluster *

                          * This method can be only called on the cluster-manager node. It tries to create a new repository on the master * and if it was successful it adds new repository to cluster metadata. @@ -162,7 +163,7 @@ public RepositoriesService( * @param request register repository request * @param listener register repository listener */ - public void registerRepository(final PutRepositoryRequest request, final ActionListener listener) { + public void registerOrUpdateRepository(final PutRepositoryRequest request, final ActionListener listener) { assert lifecycle.started() : "Trying to register new repository but service is in state [" + lifecycle.state() + "]"; final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( @@ -236,14 +237,30 @@ public ClusterState execute(ClusterState currentState) { List repositoriesMetadata = new ArrayList<>(repositories.repositories().size() + 1); for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { - if (repositoryMetadata.name().equals(newRepositoryMetadata.name())) { - if (newRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { + RepositoryMetadata updatedRepositoryMetadata = newRepositoryMetadata; + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + Settings updatedSettings = Settings.builder() + .put(newRepositoryMetadata.settings()) + .put(SYSTEM_REPOSITORY_SETTING.getKey(), true) + .build(); + updatedRepositoryMetadata = new RepositoryMetadata( + newRepositoryMetadata.name(), + newRepositoryMetadata.type(), + updatedSettings, + newRepositoryMetadata.cryptoMetadata() + ); + } + if (repositoryMetadata.name().equals(updatedRepositoryMetadata.name())) { + if (updatedRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { // Previous version is the same as this one no update is needed. return currentState; } ensureCryptoSettingsAreSame(repositoryMetadata, request); found = true; - repositoriesMetadata.add(newRepositoryMetadata); + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + ensureValidSystemRepositoryUpdate(updatedRepositoryMetadata, repositoryMetadata); + } + repositoriesMetadata.add(updatedRepositoryMetadata); } else { repositoriesMetadata.add(repositoryMetadata); } @@ -315,6 +332,7 @@ public ClusterState execute(ClusterState currentState) { for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { if (Regex.simpleMatch(request.name(), repositoryMetadata.name())) { ensureRepositoryNotInUse(currentState, repositoryMetadata.name()); + ensureNotSystemRepository(repositoryMetadata); logger.info("delete repository [{}]", repositoryMetadata.name()); changed = true; } else { @@ -439,7 +457,6 @@ public void applyClusterState(ClusterChangedEvent event) { logger.debug("unregistering repository [{}]", entry.getKey()); Repository repository = entry.getValue(); closeRepository(repository); - archiveRepositoryStats(repository, state.version()); } else { survivors.put(entry.getKey(), entry.getValue()); } @@ -456,19 +473,28 @@ public void applyClusterState(ClusterChangedEvent event) { if (previousMetadata.type().equals(repositoryMetadata.type()) == false || previousMetadata.settings().equals(repositoryMetadata.settings()) == false) { // Previous version is different from the version in settings - logger.debug("updating repository [{}]", repositoryMetadata.name()); - closeRepository(repository); - archiveRepositoryStats(repository, state.version()); - repository = null; - try { - repository = createRepository(repositoryMetadata, typesRegistry); - } catch (RepositoryException ex) { - // TODO: this catch is bogus, it means the old repo is already closed, - // but we have nothing to replace it - logger.warn( - () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), - ex + if (repository.isSystemRepository() && repository.isReloadable()) { + logger.debug( + "updating repository [{}] in-place to use new metadata [{}]", + repositoryMetadata.name(), + repositoryMetadata ); + repository.validateMetadata(repositoryMetadata); + repository.reload(repositoryMetadata); + } else { + logger.debug("updating repository [{}]", repositoryMetadata.name()); + closeRepository(repository); + repository = null; + try { + repository = createRepository(repositoryMetadata, typesRegistry); + } catch (RepositoryException ex) { + // TODO: this catch is bogus, it means the old repo is already closed, + // but we have nothing to replace it + logger.warn( + () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), + ex + ); + } } } } else { @@ -547,12 +573,12 @@ public Repository repository(String repositoryName) { } public List repositoriesStats() { - List archivedRepoStats = repositoriesStatsArchive.getArchivedStats(); List activeRepoStats = getRepositoryStatsForActiveRepositories(); + return activeRepoStats; + } - List repositoriesStats = new ArrayList<>(archivedRepoStats); - repositoriesStats.addAll(activeRepoStats); - return repositoriesStats; + public RepositoriesStats getRepositoriesStats() { + return new RepositoriesStats(repositoriesStats()); } private List getRepositoryStatsForActiveRepositories() { @@ -612,15 +638,6 @@ public void closeRepository(Repository repository) { repository.close(); } - private void archiveRepositoryStats(Repository repository, long clusterStateVersion) { - if (repository instanceof MeteredBlobStoreRepository) { - RepositoryStatsSnapshot stats = ((MeteredBlobStoreRepository) repository).statsSnapshotForArchival(clusterStateVersion); - if (repositoriesStatsArchive.archive(stats) == false) { - logger.warn("Unable to archive the repository stats [{}] as the archive is full.", stats); - } - } - } - /** * Creates repository holder. This method starts the non-internal repository */ @@ -682,6 +699,15 @@ public static void validateRepositoryMetadataSettings( + minVersionInCluster ); } + // Validation to not allow users to create system repository via put repository call. + if (isSystemRepositorySettingPresent(repositoryMetadataSettings)) { + throw new RepositoryException( + repositoryName, + "setting " + + SYSTEM_REPOSITORY_SETTING.getKey() + + " cannot provide system repository setting; this setting is managed by OpenSearch" + ); + } } private static void ensureRepositoryNotInUse(ClusterState clusterState, String repository) { @@ -756,6 +782,65 @@ public void updateRepositoriesMap(Map repos) { } } + private static void ensureNotSystemRepository(RepositoryMetadata repositoryMetadata) { + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + throw new RepositoryException(repositoryMetadata.name(), "cannot delete a system repository"); + } + } + + private static boolean isSystemRepositorySettingPresent(Settings repositoryMetadataSettings) { + return SYSTEM_REPOSITORY_SETTING.get(repositoryMetadataSettings); + } + + private static boolean isValueEqual(String key, String newValue, String currentValue) { + if (newValue == null && currentValue == null) { + return true; + } + if (newValue == null) { + throw new IllegalArgumentException("[" + key + "] cannot be empty, " + "current value [" + currentValue + "]"); + } + if (newValue.equals(currentValue) == false) { + throw new IllegalArgumentException( + "trying to modify an unmodifiable attribute " + + key + + " of system repository from " + + "current value [" + + currentValue + + "] to new value [" + + newValue + + "]" + ); + } + return true; + } + + public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMetadata, RepositoryMetadata currentRepositoryMetadata) { + if (isSystemRepositorySettingPresent(currentRepositoryMetadata.settings())) { + try { + isValueEqual("type", newRepositoryMetadata.type(), currentRepositoryMetadata.type()); + + Repository repository = repositories.get(currentRepositoryMetadata.name()); + Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); + Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + + List restrictedSettings = repository.getRestrictedSystemRepositorySettings() + .stream() + .map(setting -> setting.getKey()) + .collect(Collectors.toList()); + + for (String restrictedSettingKey : restrictedSettings) { + isValueEqual( + restrictedSettingKey, + newRepositoryMetadataSettings.get(restrictedSettingKey), + currentRepositoryMetadataSettings.get(restrictedSettingKey) + ); + } + } catch (IllegalArgumentException e) { + throw new RepositoryException(currentRepositoryMetadata.name(), e.getMessage()); + } + } + } + @Override protected void doStart() { diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java new file mode 100644 index 0000000000000..b24e0dddd852a --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Encapsulates stats for multiple repositories* + */ +public class RepositoriesStats implements Writeable, ToXContentObject { + + List repositoryStatsSnapshots; + + public RepositoriesStats(List repositoryStatsSnapshots) { + this.repositoryStatsSnapshots = repositoryStatsSnapshots; + } + + public RepositoriesStats(StreamInput in) throws IOException { + this.repositoryStatsSnapshots = in.readList(RepositoryStatsSnapshot::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(repositoryStatsSnapshots); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("repositories"); + if (CollectionUtils.isEmpty(repositoryStatsSnapshots) == false) { + for (RepositoryStatsSnapshot repositoryStatsSnapshot : repositoryStatsSnapshots) { + repositoryStatsSnapshot.toXContent(builder, params); + } + } + builder.endArray(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java index b8f100706f81e..3d35f75176eaf 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java @@ -70,11 +70,6 @@ public RepositoriesStatsArchive(TimeValue retentionPeriod, int maxCapacity, Long * @return {@code true} if the repository stats were archived, {@code false} otherwise. */ synchronized boolean archive(final RepositoryStatsSnapshot repositoryStats) { - assert containsRepositoryStats(repositoryStats) == false : "A repository with ephemeral id " - + repositoryStats.getRepositoryInfo().ephemeralId - + " is already archived"; - assert repositoryStats.isArchived(); - evict(); if (archive.size() >= maxCapacity) { @@ -116,15 +111,6 @@ private void evict() { } } - private boolean containsRepositoryStats(RepositoryStatsSnapshot repositoryStats) { - return archive.stream() - .anyMatch( - entry -> entry.repositoryStatsSnapshot.getRepositoryInfo().ephemeralId.equals( - repositoryStats.getRepositoryInfo().ephemeralId - ) - ); - } - private static class ArchiveEntry { private final RepositoryStatsSnapshot repositoryStatsSnapshot; private final long createdAtMillis; diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 10f3dc2b6b340..74cda9bcc3393 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.common.settings.Setting; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.MapperService; @@ -55,6 +56,8 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -342,6 +345,14 @@ void restoreShard( ActionListener listener ); + /** + * Returns the list of restricted system repository settings that cannot be mutated post repository creation. + * @return list of settings + */ + default List> getRestrictedSystemRepositorySettings() { + return Collections.emptyList(); + } + /** * Returns Snapshot Shard Metadata for remote store interop enabled snapshot. *

                          @@ -440,4 +451,22 @@ default void cloneRemoteStoreIndexShardSnapshot( default Map adaptUserMetadata(Map userMetadata) { return userMetadata; } + + /** + * Checks if the repository can be reloaded inplace or not + * @return true if the repository can be reloaded inplace, false otherwise + */ + default boolean isReloadable() { + return false; + } + + /** + * Reload the repository inplace + */ + default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Validate the repository metadata + */ + default void validateMetadata(RepositoryMetadata repositoryMetadata) {} } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java index 8aa86fc46d591..387a685bd6526 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -51,64 +50,27 @@ * @opensearch.internal */ public final class RepositoryInfo implements Writeable, ToXContentFragment { - public final String ephemeralId; public final String name; public final String type; public final Map location; - public final long startedAt; - @Nullable - public final Long stoppedAt; - public RepositoryInfo(String ephemeralId, String name, String type, Map location, long startedAt) { - this(ephemeralId, name, type, location, startedAt, null); - } - - public RepositoryInfo( - String ephemeralId, - String name, - String type, - Map location, - long startedAt, - @Nullable Long stoppedAt - ) { - this.ephemeralId = ephemeralId; + public RepositoryInfo(String name, String type, Map location) { this.name = name; this.type = type; this.location = location; - this.startedAt = startedAt; - if (stoppedAt != null && startedAt > stoppedAt) { - throw new IllegalArgumentException("createdAt must be before or equal to stoppedAt"); - } - this.stoppedAt = stoppedAt; } public RepositoryInfo(StreamInput in) throws IOException { - this.ephemeralId = in.readString(); this.name = in.readString(); this.type = in.readString(); this.location = in.readMap(StreamInput::readString, StreamInput::readString); - this.startedAt = in.readLong(); - this.stoppedAt = in.readOptionalLong(); - } - - public RepositoryInfo stopped(long stoppedAt) { - assert isStopped() == false : "The repository is already stopped"; - - return new RepositoryInfo(ephemeralId, name, type, location, startedAt, stoppedAt); - } - - public boolean isStopped() { - return stoppedAt != null; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(ephemeralId); out.writeString(name); out.writeString(type); out.writeMap(location, StreamOutput::writeString, StreamOutput::writeString); - out.writeLong(startedAt); - out.writeOptionalLong(stoppedAt); } @Override @@ -116,11 +78,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("repository_name", name); builder.field("repository_type", type); builder.field("repository_location", location); - builder.field("repository_ephemeral_id", ephemeralId); - builder.field("repository_started_at", startedAt); - if (stoppedAt != null) { - builder.field("repository_stopped_at", stoppedAt); - } return builder; } @@ -129,17 +86,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryInfo that = (RepositoryInfo) o; - return ephemeralId.equals(that.ephemeralId) - && name.equals(that.name) - && type.equals(that.type) - && location.equals(that.location) - && startedAt == that.startedAt - && Objects.equals(stoppedAt, that.stoppedAt); + return name.equals(that.name) && type.equals(that.type) && location.equals(that.location); } @Override public int hashCode() { - return Objects.hash(ephemeralId, name, type, location, startedAt, stoppedAt); + return Objects.hash(name, type, location); } @Override diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java index efd5d6f8560b6..ab97c5eaa1f7a 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java @@ -32,9 +32,13 @@ package org.opensearch.repositories; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -47,32 +51,63 @@ * * @opensearch.internal */ -public class RepositoryStats implements Writeable { +public class RepositoryStats implements Writeable, ToXContentFragment { public static final RepositoryStats EMPTY_STATS = new RepositoryStats(Collections.emptyMap()); + @Nullable public final Map requestCounts; + @Nullable + public final Map> extendedStats; + public final boolean detailed; public RepositoryStats(Map requestCounts) { this.requestCounts = Collections.unmodifiableMap(requestCounts); + this.extendedStats = Collections.emptyMap(); + this.detailed = false; + } + + public RepositoryStats(Map> extendedStats, boolean detailed) { + this.requestCounts = Collections.emptyMap(); + this.extendedStats = Collections.unmodifiableMap(extendedStats); + this.detailed = detailed; } public RepositoryStats(StreamInput in) throws IOException { this.requestCounts = in.readMap(StreamInput::readString, StreamInput::readLong); + this.extendedStats = in.readMap( + e -> e.readEnum(BlobStore.Metric.class), + i -> i.readMap(StreamInput::readString, StreamInput::readLong) + ); + this.detailed = in.readBoolean(); } public RepositoryStats merge(RepositoryStats otherStats) { - final Map result = new HashMap<>(); - result.putAll(requestCounts); - for (Map.Entry entry : otherStats.requestCounts.entrySet()) { - result.merge(entry.getKey(), entry.getValue(), Math::addExact); + assert this.detailed == otherStats.detailed; + if (detailed) { + final Map> result = new HashMap<>(); + result.putAll(extendedStats); + for (Map.Entry> entry : otherStats.extendedStats.entrySet()) { + for (Map.Entry nested : entry.getValue().entrySet()) { + result.get(entry.getKey()).merge(nested.getKey(), nested.getValue(), Math::addExact); + } + } + return new RepositoryStats(result, true); + } else { + final Map result = new HashMap<>(); + result.putAll(requestCounts); + for (Map.Entry entry : otherStats.requestCounts.entrySet()) { + result.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + return new RepositoryStats(result); } - return new RepositoryStats(result); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(requestCounts, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(extendedStats, StreamOutput::writeEnum, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeLong)); + out.writeBoolean(detailed); } @Override @@ -80,16 +115,32 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryStats that = (RepositoryStats) o; - return requestCounts.equals(that.requestCounts); + return requestCounts.equals(that.requestCounts) && extendedStats.equals(that.extendedStats) && detailed == that.detailed; } @Override public int hashCode() { - return Objects.hash(requestCounts); + return Objects.hash(requestCounts, detailed, extendedStats); } @Override public String toString() { - return "RepositoryStats{" + "requestCounts=" + requestCounts + '}'; + return "RepositoryStats{" + "requestCounts=" + requestCounts + "extendedStats=" + extendedStats + "detailed =" + detailed + "}"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (detailed == false) { + builder.field("request_counts", requestCounts); + } else { + extendedStats.forEach((k, v) -> { + try { + builder.field(k.metricName(), v); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + return builder; } } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java index 2b061cd2c2cc9..0a727980fad0d 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java @@ -53,21 +53,17 @@ public final class RepositoryStatsSnapshot implements Writeable, ToXContentObjec private final RepositoryInfo repositoryInfo; private final RepositoryStats repositoryStats; private final long clusterVersion; - private final boolean archived; - public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion, boolean archived) { - assert archived != (clusterVersion == UNKNOWN_CLUSTER_VERSION); + public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion) { this.repositoryInfo = repositoryInfo; this.repositoryStats = repositoryStats; this.clusterVersion = clusterVersion; - this.archived = archived; } public RepositoryStatsSnapshot(StreamInput in) throws IOException { this.repositoryInfo = new RepositoryInfo(in); this.repositoryStats = new RepositoryStats(in); this.clusterVersion = in.readLong(); - this.archived = in.readBoolean(); } public RepositoryInfo getRepositoryInfo() { @@ -78,10 +74,6 @@ public RepositoryStats getRepositoryStats() { return repositoryStats; } - public boolean isArchived() { - return archived; - } - public long getClusterVersion() { return clusterVersion; } @@ -91,18 +83,13 @@ public void writeTo(StreamOutput out) throws IOException { repositoryInfo.writeTo(out); repositoryStats.writeTo(out); out.writeLong(clusterVersion); - out.writeBoolean(archived); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); repositoryInfo.toXContent(builder, params); - builder.field("request_counts", repositoryStats.requestCounts); - builder.field("archived", archived); - if (archived) { - builder.field("cluster_version", clusterVersion); - } + repositoryStats.toXContent(builder, params); builder.endObject(); return builder; } @@ -114,13 +101,12 @@ public boolean equals(Object o) { RepositoryStatsSnapshot that = (RepositoryStatsSnapshot) o; return repositoryInfo.equals(that.repositoryInfo) && repositoryStats.equals(that.repositoryStats) - && clusterVersion == that.clusterVersion - && archived == that.archived; + && clusterVersion == that.clusterVersion; } @Override public int hashCode() { - return Objects.hash(repositoryInfo, repositoryStats, clusterVersion, archived); + return Objects.hash(repositoryInfo, repositoryStats, clusterVersion); } @Override diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 490ebda24bf60..8a2260e1f6d90 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -149,6 +149,7 @@ import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -172,6 +173,7 @@ import java.util.stream.Stream; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; +import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; /** * BlobStore - based implementation of Snapshot Repository @@ -296,21 +298,21 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.NodeScope ); - protected final boolean supportURLRepo; + protected volatile boolean supportURLRepo; - private final int maxShardBlobDeleteBatch; + private volatile int maxShardBlobDeleteBatch; - private final Compressor compressor; + private volatile Compressor compressor; - private final boolean cacheRepositoryData; + private volatile boolean cacheRepositoryData; - private final RateLimiter snapshotRateLimiter; + private volatile RateLimiter snapshotRateLimiter; - private final RateLimiter restoreRateLimiter; + private volatile RateLimiter restoreRateLimiter; - private final RateLimiter remoteUploadRateLimiter; + private volatile RateLimiter remoteUploadRateLimiter; - private final RateLimiter remoteDownloadRateLimiter; + private volatile RateLimiter remoteDownloadRateLimiter; private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric(); @@ -355,7 +357,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobStoreIndexShardSnapshots::fromXContent ); - private final boolean readOnly; + private volatile boolean readOnly; private final boolean isSystemRepository; @@ -365,7 +367,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce blobStore = new SetOnce<>(); - private final ClusterService clusterService; + protected final ClusterService clusterService; private final RecoverySettings recoverySettings; @@ -399,36 +401,54 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * IO buffer size hint for reading and writing to the underlying blob store. */ - protected final int bufferSize; + protected volatile int bufferSize; /** * Constructs new BlobStoreRepository - * @param metadata The metadata for this repository including name and settings + * @param repositoryMetadata The metadata for this repository including name and settings * @param clusterService ClusterService */ protected BlobStoreRepository( - final RepositoryMetadata metadata, - final boolean compress, + final RepositoryMetadata repositoryMetadata, final NamedXContentRegistry namedXContentRegistry, final ClusterService clusterService, final RecoverySettings recoverySettings ) { - this.metadata = metadata; + // Read RepositoryMetadata as the first step + readRepositoryMetadata(repositoryMetadata); + + isSystemRepository = SYSTEM_REPOSITORY_SETTING.get(metadata.settings()); this.namedXContentRegistry = namedXContentRegistry; this.threadPool = clusterService.getClusterApplierService().threadPool(); this.clusterService = clusterService; this.recoverySettings = recoverySettings; - this.supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); + } + + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + readRepositoryMetadata(repositoryMetadata); + } + + /** + * Reloads the values derived from the Repository Metadata + * + * @param repositoryMetadata RepositoryMetadata instance to derive the values from + */ + private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) { + this.metadata = repositoryMetadata; + + supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", ByteSizeValue.ZERO); remoteUploadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_upload_bytes_per_sec", ByteSizeValue.ZERO); remoteDownloadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_download_bytes_per_sec", ByteSizeValue.ZERO); readOnly = READONLY_SETTING.get(metadata.settings()); - isSystemRepository = SYSTEM_REPOSITORY_SETTING.get(metadata.settings()); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); maxShardBlobDeleteBatch = MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.get(metadata.settings()); - this.compressor = compress ? COMPRESSION_TYPE_SETTING.get(metadata.settings()) : CompressorRegistry.none(); + compressor = COMPRESS_SETTING.get(metadata.settings()) + ? COMPRESSION_TYPE_SETTING.get(metadata.settings()) + : CompressorRegistry.none(); } @Override @@ -831,6 +851,8 @@ public RepositoryStats stats() { final BlobStore store = blobStore.get(); if (store == null) { return RepositoryStats.EMPTY_STATS; + } else if (store.extendedStats() != null && store.extendedStats().isEmpty() == false) { + return new RepositoryStats(store.extendedStats(), true); } return new RepositoryStats(store.stats()); } @@ -2528,7 +2550,7 @@ private RepositoryMetadata getRepoMetadata(ClusterState state) { * the next version number from when the index blob was written. Each individual index-N blob is * only written once and never overwritten. The highest numbered index-N blob is the latest one * that contains the current snapshots in the repository. - * + *

                          * Package private for testing */ long latestIndexBlobId() throws IOException { @@ -3138,6 +3160,11 @@ public InputStream maybeRateLimitSnapshots(InputStream stream) { return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT); } + @Override + public List> getRestrictedSystemRepositorySettings() { + return Arrays.asList(SYSTEM_REPOSITORY_SETTING, READONLY_SETTING, REMOTE_STORE_INDEX_SHALLOW_COPY); + } + @Override public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( SnapshotId snapshotId, @@ -3309,7 +3336,12 @@ private void writeShardIndexBlobAtomic( () -> new ParameterizedMessage("[{}] Writing shard index [{}] to [{}]", metadata.name(), indexGeneration, shardContainer.path()) ); final String blobName = INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(String.valueOf(indexGeneration)); - writeAtomic(shardContainer, blobName, INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor), true); + writeAtomic( + shardContainer, + blobName, + INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS), + true + ); } // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 7e1960171043a..3e6052a5ef820 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -83,7 +83,7 @@ public final class ChecksumBlobStoreFormat { // Serialization parameters to specify correct context for metadata serialization - private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + public static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; static { Map snapshotOnlyParams = new HashMap<>(); @@ -170,36 +170,94 @@ public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistr * @param compressor whether to use compression */ public void write(final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor) throws IOException { + write(obj, blobContainer, name, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + *

                          + * The blob will optionally by compressed. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param params ToXContent params + */ + public void write( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final ToXContent.Params params + ) throws IOException { final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor); + final BytesReference bytes = serialize(obj, blobName, compressor, params); blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); } /** - * Writes blob with resolving the blob name using {@link #blobName} method. - * Leverages the multipart upload if supported by the blobContainer. + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#NORMAL} + */ + public void writeAsync( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + // use NORMAL priority by default + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.NORMAL, listener, params); + } + + /** + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#URGENT} + *

                          + * NOTE: We use this method to upload urgent priority objects like cluster state to remote stores. + * Use {@link #writeAsync(ToXContent, BlobContainer, String, Compressor, ActionListener, ToXContent.Params)} for + * other use cases. + */ + public void writeAsyncWithUrgentPriority( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.URGENT, listener, params); + } + + /** + * Method to writes blob with resolving the blob name using {@link #blobName} method with specified + * {@link WritePriority}. Leverages the multipart upload if supported by the blobContainer. * * @param obj object to be serialized * @param blobContainer blob container * @param name blob name * @param compressor whether to use compression + * @param priority write priority to be used * @param listener listener to listen to write result + * @param params ToXContent params */ - public void writeAsync( + private void writeAsyncWithPriority( final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor, - ActionListener listener + final WritePriority priority, + ActionListener listener, + final ToXContent.Params params ) throws IOException { if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { - write(obj, blobContainer, name, compressor); + write(obj, blobContainer, name, compressor, params); listener.onResponse(null); return; } final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor); - final String resourceDescription = "ChecksumBlobStoreFormat.writeAsync(blob=\"" + blobName + "\")"; + final BytesReference bytes = serialize(obj, blobName, compressor, params); + final String resourceDescription = "ChecksumBlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { long expectedChecksum; try { @@ -219,7 +277,7 @@ public void writeAsync( blobName, bytes.length(), true, - WritePriority.HIGH, + priority, (size, position) -> new OffsetRangeIndexInputStream(input, size, position), expectedChecksum, ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() @@ -230,7 +288,8 @@ public void writeAsync( } } - public BytesReference serialize(final T obj, final String blobName, final Compressor compressor) throws IOException { + public BytesReference serialize(final T obj, final String blobName, final Compressor compressor, final ToXContent.Params params) + throws IOException { try (BytesStreamOutput outputStream = new BytesStreamOutput()) { try ( OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( @@ -254,7 +313,7 @@ public void close() throws IOException { ) ) { builder.startObject(); - obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + obj.toXContent(builder, params); builder.endObject(); } CodecUtil.writeFooter(indexOutput); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java index 54f226e81025e..0651ff586d412 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -34,12 +34,10 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.UUIDs; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryInfo; import org.opensearch.repositories.RepositoryStatsSnapshot; -import org.opensearch.threadpool.ThreadPool; import java.util.Map; @@ -53,29 +51,24 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository { public MeteredBlobStoreRepository( RepositoryMetadata metadata, - boolean compress, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings, Map location ) { - super(metadata, compress, namedXContentRegistry, clusterService, recoverySettings); - ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); - this.repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), - metadata.name(), - metadata.type(), - location, - threadPool.absoluteTimeInMillis() - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); + this.repositoryInfo = new RepositoryInfo(metadata.name(), metadata.type(), location); } - public RepositoryStatsSnapshot statsSnapshot() { - return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION, false); + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + super.reload(repositoryMetadata); + + // Not adding any additional reload logic here is intentional as the constructor only + // initializes the repositoryInfo from the repo metadata, which cannot be changed. } - public RepositoryStatsSnapshot statsSnapshotForArchival(long clusterVersion) { - RepositoryInfo stoppedRepoInfo = repositoryInfo.stopped(threadPool.absoluteTimeInMillis()); - return new RepositoryStatsSnapshot(stoppedRepoInfo, stats(), clusterVersion, true); + public RepositoryStatsSnapshot statsSnapshot() { + return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION); } } diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java index 3009466f03635..4a9a91336ec1d 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java @@ -50,6 +50,8 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; import java.util.function.Function; /** @@ -61,7 +63,6 @@ *

                          {@code concurrent_streams}
                          Number of concurrent read/write stream (per repository on each node). Defaults to 5.
                          *
                          {@code chunk_size}
                          Large file can be divided into chunks. This parameter specifies the chunk size. * Defaults to not chucked.
                          - *
                          {@code compress}
                          If set to true metadata files will be stored compressed. Defaults to false.
                          * * * @opensearch.internal @@ -101,11 +102,11 @@ public class FsRepository extends BlobStoreRepository { public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path"); - private final Environment environment; + protected final Environment environment; - private ByteSizeValue chunkSize; + protected ByteSizeValue chunkSize; - private final BlobPath basePath; + protected BlobPath basePath; /** * Constructs a shared file system repository. @@ -117,8 +118,27 @@ public FsRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - super(metadata, calculateCompress(metadata, environment), namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; + validateLocation(); + readMetadata(); + } + + protected void readMetadata() { + if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + } else { + this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); + } + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + } + + protected void validateLocation() { String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { logger.warn( @@ -151,24 +171,6 @@ public FsRepository( ); } } - - if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - } else { - this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); - } - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - } - - private static boolean calculateCompress(RepositoryMetadata metadata, Environment environment) { - return COMPRESS_SETTING.exists(metadata.settings()) - ? COMPRESS_SETTING.get(metadata.settings()) - : REPOSITORIES_COMPRESS_SETTING.get(environment.settings()); } @Override @@ -187,4 +189,12 @@ protected ByteSizeValue chunkSize() { public BlobPath basePath() { return basePath; } + + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(LOCATION_SETTING); + return restrictedSettings; + } } diff --git a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java new file mode 100644 index 0000000000000..c06c805a39396 --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.fs; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; + +/** + * Extension of {@link FsRepository} that can be reloaded inplace + * + * @opensearch.internal + */ +public class ReloadableFsRepository extends FsRepository { + /** + * Constructs a shared file system repository that is reloadable in-place. + */ + public ReloadableFsRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + } + + @Override + public boolean isReloadable() { + return true; + } + + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + if (isReloadable() == false) { + return; + } + + super.reload(repositoryMetadata); + validateLocation(); + readMetadata(); + } +} diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index ac30f999d0da7..cc48b59699a17 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -524,7 +524,7 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel /** * Attempts to extract auth token and login. * - * @returns false if there was an error and the request should not continue being dispatched + * @return false if there was an error and the request should not continue being dispatched * */ private boolean handleAuthenticateUser(final RestRequest request, final RestChannel channel) { try { diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 7832649e8ad32..294dc3ffbe329 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -108,7 +108,7 @@ default List replacedRoutes() { } /** - * Controls whether requests handled by this class are allowed to to access system indices by default. + * Controls whether requests handled by this class are allowed to access system indices by default. * @return {@code true} if requests handled by this class should be allowed to access system indices. */ default boolean allowSystemIndexAccessByDefault() { diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index ebfd082d974fd..080366e536da1 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -180,6 +180,12 @@ public static void parseSearchRequest( searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); } + if (request.hasParam("phase_took")) { + // only set if we have the parameter passed to override the cluster-level default + // else phaseTook = null + searchRequest.setPhaseTook(request.paramAsBoolean("phase_took", true)); + } + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. diff --git a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java index 378ff1ccfe4a8..2396aa369e98d 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java @@ -343,11 +343,14 @@ public double decayNumericGauss(double docValue) { /** * Limitations: since script functions don't have access to DateFieldMapper, * decay functions on dates are limited to dates in the default format and default time zone, + * Further, since script module gets initialized before the featureflags are loaded, + * we cannot use the feature flag to gate the usage of the new default date format. * Also, using calculations with now are not allowed. * */ private static final ZoneId defaultZoneId = ZoneId.of("UTC"); - private static final DateMathParser dateParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); + // ToDo: use new default date formatter once feature flag is removed + private static final DateMathParser dateParser = DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); /** * Linear date decay diff --git a/server/src/main/java/org/opensearch/script/Script.java b/server/src/main/java/org/opensearch/script/Script.java index a611e71c3bf3f..9e74314c281cd 100644 --- a/server/src/main/java/org/opensearch/script/Script.java +++ b/server/src/main/java/org/opensearch/script/Script.java @@ -70,9 +70,9 @@ * {@link Script} represents used-defined input that can be used to * compile and execute a script from the {@link ScriptService} * based on the {@link ScriptType}. - * + *

                          * There are three types of scripts specified by {@link ScriptType}. - * + *

                          * The following describes the expected parameters for each type of script: * *

                            @@ -348,16 +348,16 @@ public static Script parse(Settings settings) { /** * This will parse XContent into a {@link Script}. The following formats can be parsed: - * + *

                            * The simple format defaults to an {@link ScriptType#INLINE} with no compiler options or user-defined params: - * + *

                            * Example: * {@code * "return Math.log(doc.popularity) * 100;" * } * * The complex format where {@link ScriptType} and idOrCode are required while lang, options and params are not required. - * + *

                            * {@code * { * // Exactly one of "id" or "source" must be specified @@ -391,7 +391,7 @@ public static Script parse(Settings settings) { * * This also handles templates in a special way. If a complexly formatted query is specified as another complex * JSON object the query is assumed to be a template, and the format will be preserved. - * + *

                            * {@code * { * "source" : { "query" : ... }, @@ -605,7 +605,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will build scripts into the following XContent structure: - * + *

                            * {@code * { * "<(id, source)>" : "", @@ -635,10 +635,10 @@ public void writeTo(StreamOutput out) throws IOException { * } * * Note that lang, options, and params will only be included if there have been any specified. - * + *

                            * This also handles templates in a special way. If the {@link Script#CONTENT_TYPE_OPTION} option * is provided and the {@link ScriptType#INLINE} is specified then the template will be preserved as a raw field. - * + *

                            * {@code * { * "source" : { "query" : ... }, diff --git a/server/src/main/java/org/opensearch/script/ScriptCache.java b/server/src/main/java/org/opensearch/script/ScriptCache.java index 3c50fc12dcacb..fb57e7cdfa5bd 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCache.java +++ b/server/src/main/java/org/opensearch/script/ScriptCache.java @@ -158,7 +158,7 @@ public ScriptContextStats stats(String context) { /** * Check whether there have been too many compilations within the last minute, throwing a circuit breaking exception if so. * This is a variant of the token bucket algorithm: https://en.wikipedia.org/wiki/Token_bucket - * + *

                            * It can be thought of as a bucket with water, every time the bucket is checked, water is added proportional to the amount of time that * elapsed since the last time it was checked. If there is enough water, some is removed and the request is allowed. If there is not * enough water the request is denied. Just like a normal bucket, if water is added that overflows the bucket, the extra water/capacity diff --git a/server/src/main/java/org/opensearch/script/ScriptContext.java b/server/src/main/java/org/opensearch/script/ScriptContext.java index 71ced303b062e..2180b6059dbef 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContext.java +++ b/server/src/main/java/org/opensearch/script/ScriptContext.java @@ -40,7 +40,7 @@ /** * The information necessary to compile and run a script. - * + *

                            * A {@link ScriptContext} contains the information related to a single use case and the interfaces * and methods necessary for a {@link ScriptEngine} to implement. *

                            diff --git a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java index ef64959f99a3c..30756ff702e8f 100644 --- a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java @@ -56,11 +56,11 @@ /** * The allowable types, languages and their corresponding contexts. When serialized there is a top level types_allowed list, * meant to reflect the setting script.allowed_types with the allowed types (eg inline, stored). - * + *

                            * The top-level language_contexts list of objects have the language (eg. painless, * mustache) and a list of contexts available for the language. It is the responsibility of the caller to ensure * these contexts are filtered by the script.allowed_contexts setting. - * + *

                            * The json serialization of the object has the form: * * { diff --git a/server/src/main/java/org/opensearch/script/ScriptMetadata.java b/server/src/main/java/org/opensearch/script/ScriptMetadata.java index 5f529fccd213c..fd92d8f7f02db 100644 --- a/server/src/main/java/org/opensearch/script/ScriptMetadata.java +++ b/server/src/main/java/org/opensearch/script/ScriptMetadata.java @@ -183,9 +183,9 @@ static ScriptMetadata deleteStoredScript(ScriptMetadata previous, String id) { /** * This will parse XContent into {@link ScriptMetadata}. - * + *

                            * The following format will be parsed: - * + *

                            * {@code * { * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", @@ -356,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from {@link ScriptMetadata}. The following format will be written: - * + *

                            * {@code * { * "" : "<{@link StoredScriptSource#toXContent(XContentBuilder, Params)}>", diff --git a/server/src/main/java/org/opensearch/script/StoredScriptSource.java b/server/src/main/java/org/opensearch/script/StoredScriptSource.java index 0ff44b3af890a..d1dae67d0e55f 100644 --- a/server/src/main/java/org/opensearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/opensearch/script/StoredScriptSource.java @@ -308,7 +308,7 @@ public static StoredScriptSource parse(BytesReference content, MediaType mediaTy /** * This will parse XContent into a {@link StoredScriptSource}. The following format is what will be parsed: - * + *

                            * {@code * { * "script" : { @@ -387,7 +387,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from a {@link StoredScriptSource}. The following format will be written: - * + *

                            * {@code * { * "script" : { diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 28931bb5a860f..960b46d68977b 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -896,6 +896,8 @@ public void evaluateRequestShouldUseConcurrentSearch() { && aggregations().factories() != null && !aggregations().factories().allFactoriesSupportConcurrentSearch()) { requestShouldUseConcurrentSearch.set(false); + } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + requestShouldUseConcurrentSearch.set(false); } else { requestShouldUseConcurrentSearch.set(true); } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index c6187e5949035..7be51643eeb7d 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -243,7 +243,12 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu } public DateTime(StreamInput in) throws IOException { - this.formatter = DateFormatter.forPattern(in.readString()); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.formatter = DateFormatter.forPattern(in.readString(), in.readOptionalString()); + } else { + this.formatter = DateFormatter.forPattern(in.readString()); + } + this.parser = formatter.toDateMathParser(); String zoneId = in.readString(); this.timeZone = ZoneId.of(zoneId); @@ -260,7 +265,14 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(formatter.pattern()); + if (out.getVersion().before(Version.V_2_12_0) && formatter.equals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER)) { + out.writeString(DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.pattern()); // required for backwards compatibility + } else { + out.writeString(formatter.pattern()); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalString(formatter.printPattern()); + } out.writeString(timeZone.getId()); out.writeVInt(resolution.ordinal()); if (out.getVersion().before(Version.V_3_0_0)) { diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index 0fbd41f062710..d812f1290efe3 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -542,7 +542,7 @@ public static MultiValueMode fromString(String sortMode) { * Return a {@link NumericDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDocValues select(final SortedNumericDocValues values) { @@ -583,12 +583,12 @@ protected long pick(SortedNumericDocValues values) throws IOException { /** * Return a {@link NumericDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -658,7 +658,7 @@ protected long pick( * Return a {@link NumericDoubleValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDoubleValues select(final SortedNumericDoubleValues values) { @@ -694,12 +694,12 @@ protected double pick(SortedNumericDoubleValues values) throws IOException { /** * Return a {@link NumericDoubleValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -761,7 +761,7 @@ protected double pick( * Return a {@link BinaryDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

                            * Allowed Modes: MIN, MAX */ public BinaryDocValues select(final SortedBinaryDocValues values, final BytesRef missingValue) { @@ -816,12 +816,12 @@ protected BytesRef pick(SortedBinaryDocValues values) throws IOException { /** * Return a {@link BinaryDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

                            * Allowed Modes: MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -889,7 +889,7 @@ protected BytesRef pick( /** * Return a {@link SortedDocValues} instance that can be used to sort documents * with this mode and the provided values. - * + *

                            * Allowed Modes: MIN, MAX */ public SortedDocValues select(final SortedSetDocValues values) { @@ -949,11 +949,11 @@ protected int pick(SortedSetDocValues values) throws IOException { /** * Return a {@link SortedDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. - * + *

                            * Allowed Modes: MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ diff --git a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java index 4d86c6c2e2277..1df58666f6fdb 100644 --- a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java @@ -47,10 +47,10 @@ * Any state needs to be serialized as part of the {@link Writeable#writeTo(StreamOutput)} method and * read from the incoming stream, usually done adding a constructor that takes {@link StreamInput} as * an argument. - * + *

                            * Registration happens through {@link SearchPlugin#getSearchExts()}, which also needs a {@link CheckedFunction} that's able to parse * the incoming request from the REST layer into the proper {@link SearchExtBuilder} subclass. - * + *

                            * {@link #getWriteableName()} must return the same name as the one used for the registration * of the {@link SearchExtSpec}. * diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 49f1cc585fd80..5649ec383a9e9 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -1035,7 +1035,7 @@ public int getOffset() { /** * Returns the next child nested level if there is any, otherwise null is returned. - * + *

                            * In the case of mappings with multiple levels of nested object fields */ public NestedIdentity getChild() { diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 0f1030b87c036..62d397de58187 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -327,7 +327,7 @@ public class SearchModule { /** * Constructs a new SearchModule object - * + *

                            * NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided. * When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings. * @param settings Current settings diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java index 392c65ce27aea..47e9def094623 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java @@ -135,7 +135,7 @@ public ScoreMode scoreMode() { * Returns a converter for point values if it's safe to use the indexed data instead of * doc values. Generally, this means that the query has no filters or scripts, the aggregation is * top level, and the underlying field is indexed, and the index is sorted in the right order. - * + *

                            * If those conditions aren't met, return null to indicate a point reader cannot * be used in this case. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java index e4d64e1e8517c..288c7a64ba795 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java @@ -125,7 +125,7 @@ public double sortValue(AggregationPath.PathElement head, Iterator * This method first reduces the aggregations, and if it is the final reduce, then reduce the pipeline * aggregations (both embedded parent/sibling as well as top-level sibling pipelines) */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index 67af0b13eed3b..eef427754f535 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -132,7 +132,7 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do /** * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(long[])} to merge the actual * ordinals and doc ID deltas. - * + *

                            * Refer to that method for documentation about the merge map. * * @deprecated use {@link mergeBuckets(long, LongUnaryOperator)} @@ -146,7 +146,7 @@ public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + *

                            * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(LongUnaryOperator)} to * merge the actual ordinals and doc ID deltas. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java index de74055bb94f3..dfb8f6be7155d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java @@ -101,7 +101,7 @@ private GeoTileUtils() {} /** * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. - * + *

                            * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). * * @param parser {@link XContentParser} to parse the value from diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index 8d35c1edc8cb0..3e9424eda92a9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -55,16 +55,17 @@ public MergingBucketsDeferringCollector(SearchContext context, boolean isGlobal) /** * Merges/prunes the existing bucket ordinals and docDeltas according to the provided mergeMap. - * + *

                            * The mergeMap is an array where the index position represents the current bucket ordinal, and * the value at that position represents the ordinal the bucket should be merged with. If * the value is set to -1 it is removed entirely. - * + *

                            * For example, if the mergeMap [1,1,3,-1,3] is provided: - * - Buckets `0` and `1` will be merged to bucket ordinal `1` - * - Bucket `2` and `4` will be merged to ordinal `3` - * - Bucket `3` will be removed entirely - * + *

                              + *
                            • Buckets `0` and `1` will be merged to bucket ordinal `1`
                            • + *
                            • Bucket `2` and `4` will be merged to ordinal `3`
                            • + *
                            • Bucket `3` will be removed entirely
                            • + *
                            * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. * @@ -80,7 +81,7 @@ public void mergeBuckets(long[] mergeMap) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + *

                            * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 366de8619fd55..fd94ba355238a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -216,7 +216,7 @@ public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInter /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

                            * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -229,7 +229,7 @@ public DateHistogramValuesSourceBuilder calendarInterval(DateHistogramInterval i /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

                            * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 27619044d8995..9f8a4cff5f3fc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -142,10 +142,10 @@ public String getWriteableName() { @Override public InternalComposite create(List newBuckets) { - /** - * This is used by pipeline aggregations to filter/remove buckets so we - * keep the afterKey of the original aggregation in order - * to be able to retrieve the next page even if all buckets have been filtered. + /* + This is used by pipeline aggregations to filter/remove buckets so we + keep the afterKey of the original aggregation in order + to be able to retrieve the next page even if all buckets have been filtered. */ return new InternalComposite( name, @@ -473,8 +473,8 @@ public int compareKey(InternalBucket other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java index 8382b191025fe..2a1544e218f2c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -70,9 +70,9 @@ public static ParsedComposite fromXContent(XContentParser parser, String name) t ParsedComposite aggregation = PARSER.parse(parser, null); aggregation.setName(name); if (aggregation.afterKey == null && aggregation.getBuckets().size() > 0) { - /** - * Previous versions (< 6.3) don't send afterKey - * in the response so we set it as the last returned buckets. + /* + Previous versions (< 6.3) don't send afterKey + in the response so we set it as the last returned buckets. */ aggregation.setAfterKey(aggregation.getBuckets().get(aggregation.getBuckets().size() - 1).key); } @@ -130,8 +130,8 @@ void setKey(Map key) { @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java index bd0a4f13ddf08..9442529bf9342 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -62,7 +62,7 @@ abstract class SortedDocsProducer { * Visits all non-deleted documents in iterator and fills the provided queue * with the top composite buckets extracted from the collection. * Documents that contain a top composite bucket are added in the provided builder if it is not null. - * + *

                            * Returns true if the queue is full and the current leadSourceBucket did not produce any competitive * composite buckets. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 55c841f5b9c04..a0a636c121e12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -75,7 +75,7 @@ public FilterAggregatorFactory( * necessary. This is done lazily so that the {@link Weight} is only created * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. - * + *

                            * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index 35d968b789a21..a8e157a1cbb79 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -92,7 +92,7 @@ public FiltersAggregatorFactory( * necessary. This is done lazily so that the {@link Weight}s are only * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. - * + *

                            * Note: With concurrent segment search use case, multiple aggregation collectors executing * on different threads will try to fetch the weights. To handle the race condition there is * a synchronization block diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index d7f89225524c0..b4f1e78f77aaf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -546,7 +546,7 @@ private int collectValue(long owningBucketOrd, int roundingIdx, int doc, long ro /** * Increase the rounding of {@code owningBucketOrd} using - * estimated, bucket counts, {@link #rebucket() rebucketing} the all + * estimated, bucket counts, {@link FromMany#rebucket()} rebucketing} the all * buckets if the estimated number of wasted buckets is too high. */ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucketCount, long newKey, int oldRounding) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 181fb468f3356..a978b5cfa3e3e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -245,7 +245,7 @@ public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterv /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

                            * This is mutually exclusive with {@link DateHistogramAggregationBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -258,7 +258,7 @@ public DateHistogramAggregationBuilder calendarInterval(DateHistogramInterval in /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

                            * This is mutually exclusive with {@link DateHistogramAggregationBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8233c3d995dda..f602eea7a9b12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -216,9 +216,7 @@ public void collectDebugInfo(BiConsumer add) { } /** - * Returns the size of the bucket in specified units. - * - * If unitSize is null, returns 1.0 + * @return the size of the bucket in specified units, or 1.0 if unitSize is null */ @Override public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 8f8e71f3cd685..9f907bcacadf9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -129,7 +129,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Converts this DateHistogramInterval into a millisecond representation. If this is a calendar * interval, it is an approximation of milliseconds based on the fixed equivalent (e.g. `1h` is treated as 60 * fixed minutes, rather than the hour at a specific point in time. - * + *

                            * This is merely a convenience helper for quick comparisons and should not be used for situations that * require precise durations. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java index a2c63cf25c0c2..1d21152b6f622 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java @@ -34,7 +34,7 @@ /** * A shared interface for aggregations that parse and use "interval" parameters. - * + *

                            * Provides definitions for the new fixed and calendar intervals, and deprecated * defintions for the old interval/dateHisto interval parameters * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 2f8d512b7511f..83c20ba1c1d04 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -53,13 +53,13 @@ /** * A class that handles all the parsing, bwc and deprecations surrounding date histogram intervals. - * + *

                            * - Provides parser helpers for the deprecated interval/dateHistogramInterval parameters. * - Provides parser helpers for the new calendar/fixed interval parameters * - Can read old intervals from a stream and convert to new intervals * - Can write new intervals to old format when streaming out * - Provides a variety of helper methods to interpret the intervals as different types, depending on caller's need - * + *

                            * After the deprecated parameters are removed, this class can be simplified greatly. The legacy options * will be removed, and the mutual-exclusion checks can be done in the setters directly removing the need * for the enum and the complicated "state machine" logic @@ -220,7 +220,7 @@ public DateHistogramInterval getAsCalendarInterval() { /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

                            * This is mutually exclusive with {@link DateIntervalWrapper#fixedInterval(DateHistogramInterval)} * * @param interval The fixed interval to use @@ -250,7 +250,7 @@ public DateHistogramInterval getAsFixedInterval() { /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

                            * This is mutually exclusive with {@link DateIntervalWrapper#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java index 69c70ed1bf7fd..235ae8fcee6d7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java @@ -49,7 +49,7 @@ /** * Represent hard_bounds and extended_bounds in histogram aggregations. - * + *

                            * This class is similar to {@link LongBounds} used in date histograms, but is using longs to store data. LongBounds and DoubleBounds are * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 52f689eb7c229..8ebd67bc1ebe5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -491,7 +491,7 @@ private void mergeBucketsWithPlan(List buckets, List plan, * Makes a merge plan by simulating the merging of the two closest buckets, until the target number of buckets is reached. * Distance is determined by centroid comparison. * Then, this plan is actually executed and the underlying buckets are merged. - * + *

                            * Requires: buckets is sorted by centroid. */ private void mergeBucketsIfNeeded(List buckets, int targetNumBuckets, ReduceContext reduceContext) { @@ -567,7 +567,7 @@ private void mergeBucketsWithSameMin(List buckets, ReduceContext reduceC /** * When two adjacent buckets A, B overlap (A.max > B.min) then their boundary is set to * the midpoint: (A.max + B.min) / 2. - * + *

                            * After this adjustment, A will contain more values than indicated and B will have less. */ private void adjustBoundsForOverlappingBuckets(List buckets, ReduceContext reduceContext) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java index e138824e7fce8..ad6572916c84a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java @@ -54,7 +54,7 @@ /** * Represent hard_bounds and extended_bounds in date-histogram aggregations. - * + *

                            * This class is similar to {@link DoubleBounds} used in histograms, but is using longs to store data. LongBounds and DoubleBounds are * * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 11ff7dbc407cb..526945243c786 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -76,7 +76,7 @@ public class VariableWidthHistogramAggregator extends DeferableBucketAggregator /** * This aggregator goes through multiple phases of collection. Each phase has a different CollectionPhase::collectValue * implementation - * + *

                            * Running a clustering algorithm like K-Means is unfeasible because large indices don't fit into memory. * But having multiple collection phases lets us accurately bucket the docs in one pass. */ @@ -231,7 +231,7 @@ protected void swap(int i, int j) { * Produces a merge map where `mergeMap[i]` represents the index that values[i] * would be moved to if values were sorted * In other words, this method produces a merge map that will sort values - * + *

                            * See BucketsAggregator::mergeBuckets to learn more about the merge map */ public long[] generateMergeMap() { @@ -242,10 +242,10 @@ public long[] generateMergeMap() { /** * Sorting the documents by key lets us bucket the documents into groups with a single linear scan - * + *

                            * But we can't do this by just sorting buffer, because we also need to generate a merge map * for every change we make to the list, so that we can apply the changes to the underlying buckets as well. - * + *

                            * By just creating a merge map, we eliminate the need to actually sort buffer. We can just * use the merge map to find any doc's sorted index. */ @@ -347,7 +347,7 @@ private void createAndAppendNewCluster(double value) { /** * Move the last cluster to position idx * This is expensive because a merge map of size numClusters is created, so don't call this method too often - * + *

                            * TODO: Make this more efficient */ private void moveLastCluster(int index) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 23e2bc6b3e54b..a886bdb3ae188 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -55,7 +55,7 @@ /** * Aggregate on only the top-scoring docs on a shard. - * + *

                            * TODO currently the diversity feature of this agg offers only 'script' and * 'field' as a means of generating a de-dup value. In future it would be nice * if users could use any of the "bucket" aggs syntax (geo, date histogram...) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java index 4d1cbd4ce72f1..5b90163fa3959 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java @@ -407,8 +407,8 @@ public int hashCode() { /** * Copy from InternalComposite - * - * Format obj using the provided {@link DocValueFormat}. + *

                            + * Format {@code obj} using the provided {@link DocValueFormat}. * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is * for numbers and a string for {@link BytesRef}s. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java index 341ac021f6e0c..0e773291881cf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java @@ -436,11 +436,11 @@ public InternalAggregation reduce(List aggregations, Reduce } final List reducedBuckets; - /** - * Buckets returned by a partial reduce or a shard response are sorted by key. - * That allows to perform a merge sort when reducing multiple aggregations together. - * For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of - * the provided aggregations use a different {@link InternalTerms#reduceOrder}. + /* + Buckets returned by a partial reduce or a shard response are sorted by key. + That allows to perform a merge sort when reducing multiple aggregations together. + For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of + the provided aggregations use a different {@link InternalTerms#reduceOrder}. */ BucketOrder thisReduceOrder = getReduceOrder(aggregations); if (isKeyOrder(thisReduceOrder)) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java index 0482ef823818c..59f48bd7fbaba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -94,10 +94,10 @@ public MultiTermsAggregator( this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); // Todo, copy from TermsAggregator. need to remove duplicate code. if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index dc616ca7512be..5d83d926ab36f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -196,7 +196,7 @@ public double getPrecision() { * Set's the false-positive rate for individual cuckoo filters. Does not dictate the overall fpp rate * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall * error rate grows differently than individual filters - * + *

                            * This value does, however, affect the overall space usage of the filter. Coarser precisions provide * more compact filters. The default is 0.01 */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java index 7cacf1e918380..845149d894aad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -244,10 +244,10 @@ public TermsAggregator( partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); this.format = format; if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java index 6b998fc86361d..902e4d69ed5fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java @@ -34,9 +34,9 @@ /** * Hyperloglog counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * 40671.pdf and its + * appendix + *

                            * Trying to understand what this class does without having read the paper is considered adventurous. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java index 4354a23b70f6b..e74179b403e8e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java @@ -46,7 +46,6 @@ /** * Base class for HLL++ algorithms. - * * It contains methods for cloning and serializing the data structure. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java index 3f5f524c9c2f5..7c00b25ae365f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java @@ -36,11 +36,11 @@ /** * Linear counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * 40671.pdf and its + * appendix + *

                            * Trying to understand what this class does without having read the paper is considered adventurous. - * + *

                            * The algorithm just keep a record of all distinct values provided encoded as an integer. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index 762b10fc28fa7..fa8830de5dab9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -73,7 +73,7 @@ public static > ConstructingO ParseField valuesField ) { - /** + /* * This is a non-ideal ConstructingObjectParser, because it is a compromise between Percentiles and Ranks. * Ranks requires an array of values because there is no sane default, and we want to keep that in the ctor. * Percentiles has defaults, which means the API allows the user to either use the default or configure @@ -86,6 +86,7 @@ public static > ConstructingO * out the behavior from there * * `args` are provided from the ConstructingObjectParser in-order they are defined in the parser. So: + * * - args[0]: values * - args[1]: tdigest config options * - args[2]: hdr config options @@ -197,7 +198,7 @@ public boolean keyed() { /** * Expert: set the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + *

                            * Deprecated: set numberOfSignificantValueDigits by configuring a {@link PercentilesConfig.Hdr} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -217,7 +218,7 @@ public T numberOfSignificantValueDigits(int numberOfSignificantValueDigits) { /** * Expert: get the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + *

                            * Deprecated: get numberOfSignificantValueDigits by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -232,7 +233,7 @@ public int numberOfSignificantValueDigits() { /** * Expert: set the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + *

                            * Deprecated: set compression by configuring a {@link PercentilesConfig.TDigest} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -249,7 +250,7 @@ public T compression(double compression) { /** * Expert: get the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + *

                            * Deprecated: get compression by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -317,15 +318,15 @@ public T percentilesConfig(PercentilesConfig percentilesConfig) { /** * Return the current algo configuration, or a default (Tdigest) otherwise - * + *

                            * This is needed because builders don't have a "build" or "finalize" method, but * the old API did bake in defaults. Certain operations like xcontent, equals, hashcode * will use the values in the builder at any time and need to be aware of defaults. - * + *

                            * But to maintain BWC behavior as much as possible, we allow the user to set * algo settings independent of method. To keep life simple we use a null to track * if any method has been selected yet. - * + *

                            * However, this means we need a way to fetch the default if the user hasn't * selected any method and uses a builder-side feature like xcontent */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 7bd1e1aa22a90..7ab35eaed785c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -49,16 +49,16 @@ * Hyperloglog++ counter, implemented based on pseudo code from * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + *

                            * This implementation is different from the original implementation in that it uses a hash table instead of a sorted list for linear * counting. Although this requires more space and makes hyperloglog (which is less accurate) used sooner, this is also considerably faster. - * + *

                            * Trying to understand what this class does without having read the paper is considered adventurous. - * + *

                            * The HyperLogLogPlusPlus contains two algorithms, one for linear counting and the HyperLogLog algorithm. Initially hashes added to the * data structure are processed using the linear counting until a threshold defined by the precision is reached where the data is replayed * to the HyperLogLog algorithm and then this is used. - * + *

                            * It supports storing several HyperLogLogPlusPlus structures which are identified by a bucket number. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java index 252df7358ddcd..558e9df93c804 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java @@ -40,7 +40,7 @@ /** * AbstractHyperLogLogPlusPlus instance that only supports linear counting. The maximum number of hashes supported * by the structure is determined at construction time. - * + *

                            * This structure expects all the added values to be distinct and therefore there are no checks * if an element has been previously added. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index be98df384fc28..6f9be06231819 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -52,7 +52,7 @@ /** * A field data based aggregator that counts the number of values a specific field has within the aggregation context. - * + *

                            * This aggregator works in a multi-bucket mode, that is, when serves as a sub-aggregator, a single aggregator instance aggregates the * counts for all buckets owned by the parent aggregator) * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java index 3015e7ce9f364..c7f2a29793bff 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java @@ -64,7 +64,7 @@ public class BucketHelpers { * a date_histogram might have empty buckets due to no data existing for that time interval. * This can cause problems for operations like a derivative, which relies on a continuous * function. - * + *

                            * "insert_zeros": empty buckets will be filled with zeros for all metrics * "skip": empty buckets will simply be ignored * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index a4c3c14f3365f..3e97eda693b91 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -52,18 +52,18 @@ /** * This pipeline aggregation gives the user the ability to script functions that "move" across a window * of data, instead of single data points. It is the scripted version of MovingAvg pipeline agg. - * + *

                            * Through custom script contexts, we expose a number of convenience methods: - * - * - max - * - min - * - sum - * - unweightedAvg - * - linearWeightedAvg - * - ewma - * - holt - * - holtWintersMovAvg - * + *

                              + *
                            • max
                            • + *
                            • min
                            • + *
                            • sum
                            • + *
                            • unweightedAvg
                            • + *
                            • linearWeightedAvg
                            • + *
                            • ewma
                            • + *
                            • holt
                            • + *
                            • holtWintersMovAvg
                            • + *
                            * The user can also define any arbitrary logic via their own scripting, or combine with the above methods. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java index bac486576f537..051b9c43f63f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java @@ -75,7 +75,7 @@ public static double sum(double[] values) { /** * Calculate a simple unweighted (arithmetic) moving average. - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -94,7 +94,7 @@ public static double unweightedAvg(double[] values) { /** * Calculate a standard deviation over the values using the provided average. - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -118,7 +118,7 @@ public static double stdDev(double[] values, double avg) { /** * Calculate a linearly weighted moving average, such that older values are * linearly less important. "Time" is determined by position in collection - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -141,11 +141,11 @@ public static double linearWeightedAvg(double[] values) { /** * * Calculate a exponentially weighted moving average. - * + *

                            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -171,13 +171,13 @@ public static double ewma(double[] values, double alpha) { /** * Calculate a doubly exponential weighted moving average - * + *

                            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

                            * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -241,14 +241,14 @@ public static double[] holtForecast(double[] values, double alpha, double beta, /** * Calculate a triple exponential weighted moving average - * + *

                            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

                            * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data. * Gamma is equivalent to alpha, but controls the smoothing of the seasonality instead of the data - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java index a61a866228161..8427346357b0e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java @@ -36,11 +36,11 @@ /** * A cost minimizer which will fit a MovAvgModel to the data. - * + *

                            * This optimizer uses naive simulated annealing. Random solutions in the problem space * are generated, compared against the last period of data, and the least absolute deviation * is recorded as a cost. - * + *

                            * If the new cost is better than the old cost, the new coefficients are chosen. If the new * solution is worse, there is a temperature-dependent probability it will be randomly selected * anyway. This allows the algo to sample the problem space widely. As iterations progress, @@ -114,7 +114,7 @@ private static double acceptanceProbability(double oldCost, double newCost, doub /** * Calculates the "cost" of a model. E.g. when run on the training data, how closely do the predictions * match the test data - * + *

                            * Uses Least Absolute Differences to calculate error. Note that this is not scale free, but seems * to work fairly well in practice * diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index 25cdd76183602..a7de47bed2e6e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -78,9 +78,9 @@ * Provides a set of static helpers to determine if a particular type of InternalAggregation "has a value" * or not. This can be difficult to determine from an external perspective because each agg uses * different internal bookkeeping to determine if it is empty or not (NaN, +/-Inf, 0.0, etc). - * + *

                            * This set of helpers aim to ease that task by codifying what "empty" is for each agg. - * + *

                            * It is not entirely accurate for all aggs, since some do not expose or track the needed state * (e.g. sum doesn't record count, so it's not clear if the sum is 0.0 because it is empty * or because of summing to zero). Pipeline aggs in particular are not well supported diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java index 224f9281705e1..f6d6fe28a56d3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java @@ -274,7 +274,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, * MappedFieldType, it prefers to get the formatter from there. Only when a field can't be * resolved (which is to say script cases and unmapped field cases), it will fall back to calling this method on whatever * ValuesSourceType it was able to resolve to. - * + *

                            * For geoshape field we may never hit this function till we have aggregations which are only geo_shape * specific and not present on geo_points, as we use default CoreValueSource types for Geo based aggregations * as GEOPOINT @@ -411,7 +411,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, @Override public DocValueFormat getFormatter(String format, ZoneId tz) { return new DocValueFormat.DateTime( - format == null ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER : DateFormatter.forPattern(format), + format == null ? DateFieldMapper.getDefaultDateTimeFormatter() : DateFormatter.forPattern(format), tz == null ? ZoneOffset.UTC : tz, // If we were just looking at fields, we could read the resolution from the field settings, but we need to deal with script // output, which has no way to indicate the resolution, so we need to default to something. Milliseconds is the standard. diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index e3f914ca259f6..c866238d12fcb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Similar to {@link ValuesSourceAggregationBuilder}, except it references multiple ValuesSources (e.g. so that an aggregation * can pull values from multiple fields). - * + *

                            * A limitation of this class is that all the ValuesSource's being refereenced must be of the same type. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java index 59fa2e03f0bc3..33fefa57d50f0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java @@ -61,7 +61,7 @@ public enum ValueType implements Writeable { "date", "date", CoreValuesSourceType.DATE, - new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS) + new DocValueFormat.DateTime(DateFieldMapper.getDefaultDateTimeFormatter(), ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS) ), IP((byte) 6, "ip", "ip", CoreValuesSourceType.IP, DocValueFormat.IP), // TODO: what is the difference between "number" and "numeric"? diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 70382369d5615..7a73fafb4a809 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -264,7 +264,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { /** * DO NOT OVERRIDE THIS! - * + *

                            * This method only exists for legacy support. No new aggregations need this, nor should they override it. * * @param version For backwards compatibility, subclasses can change behavior based on the version diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java index 86102e63297d1..9158e9f59cab2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java @@ -43,14 +43,14 @@ * {@link ValuesSourceType} represents a collection of fields that share a common set of operations, for example all numeric fields. * Aggregations declare their support for a given ValuesSourceType (via {@link ValuesSourceRegistry.Builder#register}), * and should then not need to care about the fields which use that ValuesSourceType. - * + *

                            * ValuesSourceTypes provide a set of methods to instantiate concrete {@link ValuesSource} instances, based on the actual source of the * data for the aggregations. In general, aggregations should not call these methods, but rather rely on {@link ValuesSourceConfig} to have * selected the correct implementation. - * + *

                            * ValuesSourceTypes should be stateless. We recommend that plugins define an enum for their ValuesSourceTypes, even if the plugin only * intends to define one ValuesSourceType. ValuesSourceTypes are not serialized as part of the aggregations framework. - * + *

                            * Prefer reusing an existing ValuesSourceType (ideally from {@link CoreValuesSourceType}) over creating a new type. There are some cases * where creating a new type is necessary however. In particular, consider a new ValuesSourceType if the field has custom encoding/decoding * requirements; if the field needs to expose additional information to the aggregation (e.g. {@link ValuesSource.Range#rangeType()}); or diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 4f6c2c327509d..ebf9623eb367a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -255,8 +255,8 @@ boolean isNodeInDuress() { return isNodeInDuress; } - /** - * Returns true if the increase in heap usage is due to search requests. + /* + Returns true if the increase in heap usage is due to search requests. */ /** @@ -399,6 +399,7 @@ public SearchBackpressureStats nodeStats() { SearchTaskStats searchTaskStats = new SearchTaskStats( searchBackpressureStates.get(SearchTask.class).getCancellationCount(), searchBackpressureStates.get(SearchTask.class).getLimitReachedCount(), + searchBackpressureStates.get(SearchTask.class).getCompletionCount(), taskTrackers.get(SearchTask.class) .stream() .collect(Collectors.toUnmodifiableMap(t -> TaskResourceUsageTrackerType.fromName(t.name()), t -> t.stats(searchTasks))) @@ -407,6 +408,7 @@ public SearchBackpressureStats nodeStats() { SearchShardTaskStats searchShardTaskStats = new SearchShardTaskStats( searchBackpressureStates.get(SearchShardTask.class).getCancellationCount(), searchBackpressureStates.get(SearchShardTask.class).getLimitReachedCount(), + searchBackpressureStates.get(SearchShardTask.class).getCompletionCount(), taskTrackers.get(SearchShardTask.class) .stream() .collect(Collectors.toUnmodifiableMap(t -> TaskResourceUsageTrackerType.fromName(t.name()), t -> t.stats(searchShardTasks))) diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index d20e3e50d419f..79494eb0d3c24 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -54,8 +54,8 @@ private static class Defaults { /** * Defines the percentage of tasks to cancel relative to the number of successful task completions. * In other words, it is the number of tokens added to the bucket on each successful task completion. - * - * The setting below is deprecated. + *

                            + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_RATIO = Setting.doubleSetting( @@ -71,8 +71,8 @@ private static class Defaults { /** * Defines the number of tasks to cancel per unit time (in millis). * In other words, it is the number of tokens added to the bucket each millisecond. - * - * The setting below is deprecated. + *

                            + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_RATE = Setting.doubleSetting( @@ -86,8 +86,8 @@ private static class Defaults { /** * Defines the maximum number of tasks that can be cancelled before being rate-limited. - * - * The setting below is deprecated. + *

                            + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_BURST = Setting.doubleSetting( diff --git a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java index 678c19d83fb96..ffe97d125b27a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java +++ b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java @@ -8,6 +8,7 @@ package org.opensearch.search.backpressure.stats; +import org.opensearch.Version; import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,21 +31,29 @@ public class SearchShardTaskStats implements ToXContentObject, Writeable { private final long cancellationCount; private final long limitReachedCount; + private final long completionCount; private final Map resourceUsageTrackerStats; public SearchShardTaskStats( long cancellationCount, long limitReachedCount, + long completionCount, Map resourceUsageTrackerStats ) { this.cancellationCount = cancellationCount; this.limitReachedCount = limitReachedCount; + this.completionCount = completionCount; this.resourceUsageTrackerStats = resourceUsageTrackerStats; } public SearchShardTaskStats(StreamInput in) throws IOException { this.cancellationCount = in.readVLong(); this.limitReachedCount = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + completionCount = in.readVLong(); + } else { + completionCount = -1; + } MapBuilder builder = new MapBuilder<>(); builder.put(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, in.readOptionalWriteable(CpuUsageTracker.Stats::new)); @@ -62,6 +71,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(entry.getKey().getName(), entry.getValue()); } builder.endObject(); + if (completionCount != -1) { + builder.field("completion_count", completionCount); + } builder.startObject("cancellation_stats") .field("cancellation_count", cancellationCount) @@ -75,6 +87,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cancellationCount); out.writeVLong(limitReachedCount); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(completionCount); + } out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER)); out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER)); @@ -88,11 +103,12 @@ public boolean equals(Object o) { SearchShardTaskStats that = (SearchShardTaskStats) o; return cancellationCount == that.cancellationCount && limitReachedCount == that.limitReachedCount + && completionCount == that.completionCount && resourceUsageTrackerStats.equals(that.resourceUsageTrackerStats); } @Override public int hashCode() { - return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats); + return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats, completionCount); } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java index 302350104bd3a..a7f9b4e3d004f 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java +++ b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java @@ -8,6 +8,7 @@ package org.opensearch.search.backpressure.stats; +import org.opensearch.Version; import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,21 +32,29 @@ public class SearchTaskStats implements ToXContentObject, Writeable { private final long cancellationCount; private final long limitReachedCount; + private final long completionCount; private final Map resourceUsageTrackerStats; public SearchTaskStats( long cancellationCount, long limitReachedCount, + long completionCount, Map resourceUsageTrackerStats ) { this.cancellationCount = cancellationCount; this.limitReachedCount = limitReachedCount; + this.completionCount = completionCount; this.resourceUsageTrackerStats = resourceUsageTrackerStats; } public SearchTaskStats(StreamInput in) throws IOException { this.cancellationCount = in.readVLong(); this.limitReachedCount = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.completionCount = in.readVLong(); + } else { + this.completionCount = -1; + } MapBuilder builder = new MapBuilder<>(); builder.put(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, in.readOptionalWriteable(CpuUsageTracker.Stats::new)); @@ -63,6 +72,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(entry.getKey().getName(), entry.getValue()); } builder.endObject(); + if (completionCount != -1) { + builder.field("completion_count", completionCount); + } builder.startObject("cancellation_stats") .field("cancellation_count", cancellationCount) @@ -76,6 +88,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cancellationCount); out.writeVLong(limitReachedCount); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(completionCount); + } out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER)); out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER)); @@ -89,11 +104,12 @@ public boolean equals(Object o) { SearchTaskStats that = (SearchTaskStats) o; return cancellationCount == that.cancellationCount && limitReachedCount == that.limitReachedCount + && completionCount == that.completionCount && resourceUsageTrackerStats.equals(that.resourceUsageTrackerStats); } @Override public int hashCode() { - return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats); + return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats, completionCount); } } diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java index fa30b2a5c7450..f3a1d5cafe755 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java @@ -88,7 +88,7 @@ public int docId() { /** * This lookup provides access to the source for the given hit document. Note * that it should always be set to the correct doc ID and {@link LeafReaderContext}. - * + *

                            * In most cases, the hit document's source is loaded eagerly at the start of the * {@link FetchPhase}. This lookup will contain the preloaded source. */ @@ -103,7 +103,7 @@ public IndexReader topLevelReader() { /** * Returns a {@link FetchSubPhaseProcessor} for this sub phase. - * + *

                            * If nothing should be executed for the provided {@code FetchContext}, then the * implementation should return {@code null} */ diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java index a8ab8c0dcb8a8..9b17d9dbcd8de 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java @@ -47,7 +47,7 @@ /** * Fetch sub phase which pulls data from doc values. - * + *

                            * Specifying {@code "docvalue_fields": ["field1", "field2"]} * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java index f50524244b115..cc941bb240b91 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java @@ -138,7 +138,7 @@ public SearchContext parentSearchContext() { /** * The _id of the root document. - * + *

                            * Since this ID is available on the context, inner hits can avoid re-loading the root _id. */ public String getId() { @@ -151,7 +151,7 @@ public void setId(String id) { /** * A source lookup for the root document. - * + *

                            * This shared lookup allows inner hits to avoid re-loading the root _source. */ public SourceLookup getRootLookup() { diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index aa86ed4e56801..a520086de8051 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -285,7 +285,7 @@ protected void search(List leaves, Weight weight, Collector c /** * Lower-level search API. - * + *

                            * {@link LeafCollector#collect(int)} is called for every matching document in * the provided ctx. */ diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index dce6da897a74b..4ec21c469d9d3 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -286,7 +286,7 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Indicates if the current index should perform frequent low level search cancellation check. - * + *

                            * Enabling low-level checks will make long running searches to react to the cancellation request faster. However, * since it will produce more cancellation checks it might slow the search performance down. */ diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java index 812030acb9561..734f62d048a5a 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java @@ -180,7 +180,7 @@ public List extractRawValues(String path) { /** * For the provided path, return its value in the source. - * + *

                            * Note that in contrast with {@link SourceLookup#extractRawValues}, array and object values * can be returned. * diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java index 9e5f2fa1592a4..bc82814e88bbd 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java @@ -31,7 +31,7 @@ /** * TODO: Copied verbatim from {@link org.opensearch.ingest.PipelineConfiguration}. - * + *

                            * See if we can refactor into a common class. I suspect not, just because this one will hold */ public class PipelineConfiguration extends AbstractDiffable implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/search/pipeline/Processor.java b/server/src/main/java/org/opensearch/search/pipeline/Processor.java index fb33f46acada4..0120d68ceb5aa 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Processor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Processor.java @@ -15,7 +15,7 @@ * Whether changes are made and what exactly is modified is up to the implementation. *

                            * Processors may get called concurrently and thus need to be thread-safe. - * + *

                            * TODO: Refactor {@link org.opensearch.ingest.Processor} to extend this interface, and specialize to IngestProcessor. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java index 0206b9b6cf716..0b80cdbef6669 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java +++ b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java @@ -18,7 +18,7 @@ /** * Information about a search pipeline processor - * + *

                            * TODO: This is copy/pasted from the ingest ProcessorInfo. * Can/should we share implementation or is this just boilerplate? * diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java index d1f525477f25f..904b04b249b1b 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java @@ -70,7 +70,7 @@ public AbstractInternalProfileTree() { * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those * that are past the rewrite phase and are now being wrapped by createWeight() ) follow * a recursive progression. We can track the dependency tree by a simple stack - * + *

                            * The only hiccup is that the first scoring query will be identical to the last rewritten * query, so we need to take special care to fix that * @@ -109,7 +109,7 @@ public PB getProfileBreakdown(E query) { /** * Helper method to add a new node to the dependency tree. - * + *

                            * Initializes a new list in the dependency tree, saves the query and * generates a new {@link AbstractProfileBreakdown} to track the timings * of this element. diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 8e87c7ff4acd4..68cf05c988b5b 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -35,6 +35,9 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.aggregation.AggregationProfiler; import org.opensearch.search.profile.aggregation.ConcurrentAggregationProfiler; +import org.opensearch.search.profile.query.ConcurrentQueryProfileTree; +import org.opensearch.search.profile.query.ConcurrentQueryProfiler; +import org.opensearch.search.profile.query.InternalQueryProfileTree; import org.opensearch.search.profile.query.QueryProfiler; import java.util.ArrayList; @@ -64,7 +67,9 @@ public Profilers(ContextIndexSearcher searcher, boolean isConcurrentSegmentSearc /** Switch to a new profile. */ public QueryProfiler addQueryProfiler() { - QueryProfiler profiler = new QueryProfiler(isConcurrentSegmentSearchEnabled); + QueryProfiler profiler = isConcurrentSegmentSearchEnabled + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); queryProfilers.add(profiler); return profiler; diff --git a/server/src/main/java/org/opensearch/search/profile/Timer.java b/server/src/main/java/org/opensearch/search/profile/Timer.java index 172762cabeb6a..864c689cf7fa0 100644 --- a/server/src/main/java/org/opensearch/search/profile/Timer.java +++ b/server/src/main/java/org/opensearch/search/profile/Timer.java @@ -53,6 +53,18 @@ public class Timer { private boolean doTiming; private long timing, count, lastCount, start, earliestTimerStartTime; + public Timer() { + this(0, 0, 0, 0, 0); + } + + public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime) { + this.timing = timing; + this.count = count; + this.lastCount = lastCount; + this.start = start; + this.earliestTimerStartTime = earliestTimerStartTime; + } + /** pkg-private for testing */ long nanoTime() { return System.nanoTime(); diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java index 8e825def13f5d..2f5d632ee2d87 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -54,14 +54,11 @@ public void startRewriteTime() { * startRewriteTime() must be called for a particular context prior to calling * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and * nonsensical - * - * @return The elapsed time */ - public long stopAndAddRewriteTime() { + public void stopAndAddRewriteTime() { long time = Math.max(1, System.nanoTime() - rewriteScratch); rewriteTime += time; rewriteScratch = 0; - return time; } public long getRewriteTime() { diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index e567fdd2d436c..59ef01f9f947a 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -70,7 +70,7 @@ public Map toBreakdownMap() { ); final long createWeightTime = topLevelBreakdownMapWithWeightTime.get(QueryTimingType.CREATE_WEIGHT.toString()); - if (sliceCollectorsToLeaves.isEmpty() || contexts.isEmpty()) { + if (contexts.isEmpty()) { // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the // create_weight time/count queryNodeTime = createWeightTime; @@ -78,6 +78,21 @@ public Map toBreakdownMap() { minSliceNodeTime = 0L; avgSliceNodeTime = 0L; return buildDefaultQueryBreakdownMap(createWeightTime); + } else if (sliceCollectorsToLeaves.isEmpty()) { + // This will happen when each slice executes search leaf for its leaves and query is rewritten for the leaf being searched. It + // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for + // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later + // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no + // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); + queryNodeTime = breakdown.toNodeTime() + createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + Map queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); + return queryBreakdownMap; } // first create the slice level breakdowns @@ -191,10 +206,12 @@ Map> buildSliceLevelBreakdown() { } // compute sliceMaxEndTime as max of sliceEndTime across all timing types sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); - sliceMinStartTime = Math.min( - sliceMinStartTime, - currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE) - ); + long currentSliceStartTime = currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE); + if (currentSliceStartTime == 0L) { + // The timer for the current timing type never starts, so we continue here + continue; + } + sliceMinStartTime = Math.min(sliceMinStartTime, currentSliceStartTime); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( timingType.toString(), diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java new file mode 100644 index 0000000000000..42bf23bb13fbe --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.Timer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * This class acts as a thread-local storage for profiling a query with concurrent execution + * + * @opensearch.internal + */ +public final class ConcurrentQueryProfiler extends QueryProfiler { + + private final Map threadToProfileTree; + // The LinkedList does not need to be thread safe, as the map associates thread IDs with LinkedList, and only + // one thread will access the LinkedList at a time. + private final Map> threadToRewriteTimers; + + public ConcurrentQueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); + long threadId = getCurrentThreadId(); + // We utilize LinkedHashMap to preserve the insertion order of the profiled queries + threadToProfileTree = Collections.synchronizedMap(new LinkedHashMap<>()); + threadToProfileTree.put(threadId, (ConcurrentQueryProfileTree) profileTree); + threadToRewriteTimers = new ConcurrentHashMap<>(); + threadToRewriteTimers.put(threadId, new LinkedList<>()); + } + + @Override + public ContextualProfileBreakdown getQueryBreakdown(Query query) { + ConcurrentQueryProfileTree profileTree = threadToProfileTree.computeIfAbsent( + getCurrentThreadId(), + k -> new ConcurrentQueryProfileTree() + ); + return profileTree.getProfileBreakdown(query); + } + + /** + * Removes the last (e.g. most recent) element on ConcurrentQueryProfileTree stack. + */ + @Override + public void pollLastElement() { + ConcurrentQueryProfileTree concurrentProfileTree = threadToProfileTree.get(getCurrentThreadId()); + if (concurrentProfileTree != null) { + concurrentProfileTree.pollLast(); + } + } + + /** + * @return a hierarchical representation of the profiled tree + */ + @Override + public List getTree() { + List profileResults = new ArrayList<>(); + for (Map.Entry profile : threadToProfileTree.entrySet()) { + profileResults.addAll(profile.getValue().getTree()); + } + return profileResults; + } + + /** + * Begin timing the rewrite phase of a request + */ + @Override + public void startRewriteTime() { + Timer rewriteTimer = new Timer(); + threadToRewriteTimers.computeIfAbsent(getCurrentThreadId(), k -> new LinkedList<>()).add(rewriteTimer); + rewriteTimer.start(); + } + + /** + * Stop recording the current rewrite timer + */ + public void stopAndAddRewriteTime() { + Timer rewriteTimer = threadToRewriteTimers.get(getCurrentThreadId()).getLast(); + rewriteTimer.stop(); + } + + /** + * @return total time taken to rewrite all queries in this concurrent query profiler + */ + @Override + public long getRewriteTime() { + long totalRewriteTime = 0L; + List rewriteTimers = new LinkedList<>(); + threadToRewriteTimers.values().forEach(rewriteTimers::addAll); + LinkedList mergedIntervals = mergeRewriteTimeIntervals(rewriteTimers); + for (long[] interval : mergedIntervals) { + totalRewriteTime += interval[1] - interval[0]; + } + return totalRewriteTime; + } + + // package private for unit testing + LinkedList mergeRewriteTimeIntervals(List timers) { + LinkedList mergedIntervals = new LinkedList<>(); + timers.sort(Comparator.comparingLong(Timer::getEarliestTimerStartTime)); + for (Timer timer : timers) { + long startTime = timer.getEarliestTimerStartTime(); + long endTime = startTime + timer.getApproximateTiming(); + if (mergedIntervals.isEmpty() || mergedIntervals.getLast()[1] < startTime) { + long[] interval = new long[2]; + interval[0] = startTime; + interval[1] = endTime; + mergedIntervals.add(interval); + } else { + mergedIntervals.getLast()[1] = Math.max(mergedIntervals.getLast()[1], endTime); + } + } + return mergedIntervals; + } + + private long getCurrentThreadId() { + return Thread.currentThread().getId(); + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java index 8b860c3a58cea..024d91a8e2ed2 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java @@ -44,11 +44,12 @@ /** * This class wraps a Lucene Collector and times the execution of: - * - setScorer() - * - collect() - * - doSetNextReader() - * - needsScores() - * + *

                              + *
                            • setScorer()
                            • + *
                            • collect()
                            • + *
                            • doSetNextReader()
                            • + *
                            • needsScores()
                            • + *
                            * InternalProfiler facilitates the linking of the Collector graph * * @opensearch.internal @@ -117,7 +118,7 @@ public Collector getCollector() { /** * Creates a human-friendly representation of the Collector name. - * + *

                            * InternalBucket Collectors use the aggregation name in their toString() method, * which makes the profiled output a bit nicer. * diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index b1024c2312c50..332c4b3551450 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -44,22 +44,22 @@ * "online" as the weights are wrapped by ContextIndexSearcher. This allows us * to know the relationship between nodes in tree without explicitly * walking the tree or pre-wrapping everything - * + *

                            * A Profiler is associated with every Search, not per Search-Request. E.g. a * request may execute two searches (query + global agg). A Profiler just * represents one of those * * @opensearch.internal */ -public final class QueryProfiler extends AbstractProfiler, Query> { +public class QueryProfiler extends AbstractProfiler, Query> { /** * The root Collector used in the search */ private InternalProfileComponent collector; - public QueryProfiler(boolean concurrent) { - super(concurrent ? new ConcurrentQueryProfileTree() : new InternalQueryProfileTree()); + public QueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); } /** Set the collector that is associated with this profiler. */ @@ -81,14 +81,14 @@ public void startRewriteTime() { /** * Stop recording the current rewrite and add it's time to the total tally, returning the * cumulative time so far. - * - * @return cumulative rewrite time */ - public long stopAndAddRewriteTime() { - return ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); + public void stopAndAddRewriteTime() { + ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); } /** + * The rewriting process is complex and hard to display because queries can undergo significant changes. + * Instead of showing intermediate results, we display the cumulative time for the non-concurrent search case. * @return total time taken to rewrite all queries in this profile */ public long getRewriteTime() { diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index 7de605a244d09..f8a1e99ff585f 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -123,7 +123,7 @@ public static QuerySearchResult nullInstance() { * Returns true if the result doesn't contain any useful information. * It is used by the search action to avoid creating an empty response on * shard request that rewrites to match_no_docs. - * + *

                            * TODO: Currently we need the concrete aggregators to build empty responses. This means that we cannot * build an empty response in the coordinating node so we rely on this hack to ensure that at least one shard * returns a valid empty response. We should move the ability to create empty responses to aggregation builders diff --git a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java index 33f8e5e7b535d..ae025f70c95b3 100644 --- a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java +++ b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java @@ -41,7 +41,7 @@ /** * A query rescorer interface used to re-rank the Top-K results of a previously * executed search. - * + *

                            * Subclasses should borrow heavily from {@link QueryRescorer} because it is * fairly well behaved and documents that tradeoffs that it is making. There * is also an {@code ExampleRescorer} that is worth looking at. diff --git a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java index d7d554c058c37..856e103193463 100644 --- a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java @@ -49,7 +49,7 @@ /** * A {@link SliceQuery} that uses the numeric doc values of a field to do the slicing. - * + *

                            * NOTE: With deterministic field values this query can be used across different readers safely. * If updates are accepted on the field you must ensure that the same reader is used for all `slice` queries. * diff --git a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java index 297020fe2fe4d..05f36b0d6f3cf 100644 --- a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java @@ -53,7 +53,7 @@ /** * A {@link SliceQuery} that uses the terms dictionary of a field to do the slicing. - * + *

                            * NOTE: The cost of this filter is O(N*M) where N is the number of unique terms in the dictionary * and M is the average number of documents per term. * For each segment this filter enumerates the terms dictionary, computes the hash code for each term and fills diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 17e5b44ec89e7..abd73b45ecc13 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -263,7 +263,7 @@ public void addTerm(T entry) { /** * Returns a integer representing the type of the suggestion. This is used for * internal serialization over the network. - * + *

                            * This class is now serialized as a NamedWriteable and this method only remains for backwards compatibility */ @Deprecated diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java index 96e47cf7c8000..e3e6cad65be62 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java @@ -63,14 +63,14 @@ /** * Suggestion response for {@link CompletionSuggester} results - * + *

                            * Response format for each entry: * { * "text" : STRING * "score" : FLOAT * "contexts" : CONTEXTS * } - * + *

                            * CONTEXTS : { * "CONTEXT_NAME" : ARRAY, * .. diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java index 56cc8fbfbcf66..139742f84b80b 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java @@ -142,7 +142,7 @@ public int getEditDistance() { /** * Returns if transpositions option is set - * + *

                            * if transpositions is set, then swapping one character for another counts as one edit instead of two. */ public boolean isTranspositions() { diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index 4fbd661037aa9..fbc39536502de 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -43,7 +43,7 @@ /** * * Extension of the {@link TopSuggestDocsCollector} that returns top documents from the completion suggester. - * + *

                            * This collector groups suggestions coming from the same document but matching different contexts * or surface form together. When different contexts or surface forms match the same suggestion form only * the best one per document (sorted by weight) is kept. @@ -55,7 +55,7 @@ class TopSuggestGroupDocsCollector extends TopSuggestDocsCollector { /** * Sole constructor - * + *

                            * Collects at most num completions * with corresponding document and weight */ diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java index 61e60293f4943..94707ff2b4569 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java @@ -54,7 +54,7 @@ /** * A {@link ContextMapping} defines criteria that can be used to * filter and/or boost suggestions at query time for {@link CompletionFieldMapper}. - * + *

                            * Implementations have to define how contexts are parsed at query/index time * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java index 0f5781fefcf07..cff5a901a473f 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java @@ -124,11 +124,11 @@ public Iterator> iterator() { * Field prepends context values with a suggestion * Context values are associated with a type, denoted by * a type id, which is prepended to the context value. - * + *

                            * Every defined context mapping yields a unique type id (index of the * corresponding context mapping in the context mappings list) * for all its context values - * + *

                            * The type, context and suggestion values are encoded as follows: *

                            * TYPE_ID | CONTEXT_VALUE | CONTEXT_SEP | SUGGESTION_VALUE @@ -209,7 +209,7 @@ public ContextQuery toContextQuery(CompletionQuery query, Map * see {@link org.opensearch.search.suggest.completion.context.ContextMappings.TypedContextField} * @return a map of context names and their values * @@ -232,7 +232,7 @@ public Map> getNamedContexts(List contexts) { /** * Loads {@link ContextMappings} from configuration - * + *

                            * Expected configuration: * List of maps representing {@link ContextMapping} * [{"name": .., "type": .., ..}, {..}] @@ -286,7 +286,7 @@ private static String extractRequiredValue(Map contextConfig, St /** * Writes a list of objects specified by the defined {@link ContextMapping}s - * + *

                            * see {@link ContextMapping#toXContent(XContentBuilder, Params)} */ @Override diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index 17f858b0b1302..0e29e928df760 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -71,7 +71,7 @@ * The suggestions can be boosted and/or filtered depending on * whether it falls within an area, represented by a query geo hash * with a specified precision - * + *

                            * {@link GeoQueryContext} defines the options for constructing * a unit of query context for this context type * diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java index 6335629f61cf1..1a00cb9465771 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -147,9 +147,9 @@ public TermStats internalTermStats(BytesRef term) throws IOException { if (termsEnum.seekExact(term)) { return new TermStats( termsEnum.docFreq(), - /** - * We use the {@link TermsEnum#docFreq()} for fields that don't - * record the {@link TermsEnum#totalTermFreq()}. + /* + We use the {@link TermsEnum#docFreq()} for fields that don't + record the {@link TermsEnum#totalTermFreq()}. */ termsEnum.totalTermFreq() == -1 ? termsEnum.docFreq() : termsEnum.totalTermFreq() ); @@ -168,10 +168,10 @@ public CandidateSet drawCandidates(CandidateSet set) throws IOException { float origThreshold = spellchecker.getThresholdFrequency(); try { if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { - /** - * We use the {@link TermStats#docFreq} to compute the frequency threshold - * because that's what {@link DirectSpellChecker#suggestSimilar} expects - * when filtering terms. + /* + We use the {@link TermStats#docFreq} to compute the frequency threshold + because that's what {@link DirectSpellChecker#suggestSimilar} expects + when filtering terms. */ int threshold = thresholdTermFrequency(original.termStats.docFreq); if (threshold == Integer.MAX_VALUE) { diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java index f1dba9793ba9e..bc942da738c7e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java @@ -70,7 +70,7 @@ public final class LinearInterpolation extends SmoothingModel { /** * Creates a linear interpolation smoothing model. - * + *

                            * Note: the lambdas must sum up to one. * * @param trigramLambda diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java index e37d964cc0424..a6bfb880cf249 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -222,7 +222,7 @@ public Integer gramSize() { * misspellings in order to form a correction. This method accepts a float * value in the range [0..1) as a fraction of the actual query terms a * number {@code >=1} as an absolute number of query terms. - * + *

                            * The default is set to {@code 1.0} which corresponds to that only * corrections with at most 1 misspelled term are returned. */ diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index af2f925f89726..1c25d8c71f948 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.SnapshotsInProgress; @@ -73,6 +74,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -401,18 +403,32 @@ private void snapshot( try { if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); + long primaryTerm = indexShard.getOperationPrimaryTerm(); // we flush first to make sure we get the latest writes snapshotted wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - long primaryTerm = indexShard.getOperationPrimaryTerm(); - final IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); + IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); long commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + try { + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (NoSuchFileException e) { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } try { repository.snapshotRemoteStoreIndexShard( indexShard.store(), snapshot.getSnapshotId(), indexId, - wrappedSnapshot.get(), + snapshotIndexCommit, getShardStateId(indexShard, snapshotIndexCommit), snapshotStatus, primaryTerm, diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 1f063bfed8cfe..71918bc73b55a 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -1565,7 +1565,7 @@ private void runNextQueuedOperation(RepositoryData repositoryData, String reposi /** * Runs a cluster state update that checks whether we have outstanding snapshot deletions that can be executed and executes them. - * + *

                            * TODO: optimize this to execute in a single CS update together with finalizing the latest snapshot */ private void runReadyDeletions(RepositoryData repositoryData, String repository) { @@ -2758,7 +2758,7 @@ public boolean assertAllListenersResolved() { * Every shard snapshot or clone state update can result in multiple snapshots being updated. In order to determine whether or not a * shard update has an effect we use an outer loop over all current executing snapshot operations that iterates over them in the order * they were started in and an inner loop over the list of shard update tasks. - * + *

                            * If the inner loop finds that a shard update task applies to a given snapshot and either a shard-snapshot or shard-clone operation in * it then it will update the state of the snapshot entry accordingly. If that update was a noop, then the task is removed from the * iteration as it was already applied before and likely just arrived on the cluster-manager node again due to retries upstream. @@ -2768,7 +2768,7 @@ public boolean assertAllListenersResolved() { * a task in the executed tasks collection applied to a shard it was waiting for to become available, then the shard snapshot operation * will be started for that snapshot entry and the task removed from the collection of tasks that need to be applied to snapshot * entries since it can not have any further effects. - * + *

                            * Package private to allow for tests. */ static final ClusterStateTaskExecutor SHARD_STATE_EXECUTOR = new ClusterStateTaskExecutor() { @@ -3058,7 +3058,7 @@ private static ShardSnapshotStatus startShardSnapshotAfterClone(ClusterState cur /** * An update to the snapshot state of a shard. - * + *

                            * Package private for testing */ static final class ShardSnapshotUpdate { diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index dcd7fc581a8fd..ee2eae60eae8f 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -550,11 +550,11 @@ public int incrementResourceTrackingThreads() { * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method * is called exactly once on all registered listeners. - * + *

                            * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). * This ensures that the number of active threads doesn't drop to zero pre-maturely. - * + *

                            * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed * with the response, any thread usage info captured after the task is unregistered may be irrelevant. * diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java index 9b61b2454b53c..2d152e513f197 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -18,7 +18,7 @@ /** * TaskCancellation represents a task eligible for cancellation. * It doesn't guarantee that the task will actually get cancelled or not; that decision is left to the caller. - * + *

                            * It contains a list of cancellation reasons along with callbacks that are invoked when cancel() is called. * * @opensearch.internal @@ -87,7 +87,7 @@ private void runOnCancelCallbacks() { /** * Returns the sum of all cancellation scores. - * + *

                            * A zero score indicates no reason to cancel the task. * A task with a higher score suggests greater possibility of recovering the node when it is cancelled. */ diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java index dc0b04244296f..24dcab98c8870 100644 --- a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; /** * Wrapper class to encapsulate tracing related settings @@ -27,6 +28,20 @@ public class TelemetrySettings { Setting.Property.Dynamic ); + public static final Setting TRACER_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.tracer.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + public static final Setting METRICS_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + /** * Probability of sampler */ @@ -39,12 +54,27 @@ public class TelemetrySettings { Setting.Property.Dynamic ); + /** + * metrics publish interval in seconds. + */ + public static final Setting METRICS_PUBLISH_INTERVAL_SETTING = Setting.timeSetting( + "telemetry.otel.metrics.publish.interval", + TimeValue.timeValueSeconds(60), + Setting.Property.NodeScope, + Setting.Property.Final + ); + private volatile boolean tracingEnabled; private volatile double samplingProbability; + private final boolean tracingFeatureEnabled; + private final boolean metricsFeatureEnabled; + public TelemetrySettings(Settings settings, ClusterSettings clusterSettings) { this.tracingEnabled = TRACER_ENABLED_SETTING.get(settings); this.samplingProbability = TRACER_SAMPLER_PROBABILITY.get(settings); + this.tracingFeatureEnabled = TRACER_FEATURE_ENABLED_SETTING.get(settings); + this.metricsFeatureEnabled = METRICS_FEATURE_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(TRACER_ENABLED_SETTING, this::setTracingEnabled); clusterSettings.addSettingsUpdateConsumer(TRACER_SAMPLER_PROBABILITY, this::setSamplingProbability); @@ -72,4 +102,12 @@ public void setSamplingProbability(double samplingProbability) { public double getSamplingProbability() { return samplingProbability; } + + public boolean isTracingFeatureEnabled() { + return tracingFeatureEnabled; + } + + public boolean isMetricsFeatureEnabled() { + return metricsFeatureEnabled; + } } diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java b/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java new file mode 100644 index 0000000000000..c7e2229c18437 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Optional; + +/** + * {@link MetricsRegistryFactory} represents a single global class that is used to access {@link MetricsRegistry}s. + *

                            + * The {@link MetricsRegistry} singleton object can be retrieved using MetricsRegistryFactory::getMetricsRegistry. The {@link MetricsRegistryFactory} object + * is created during class initialization and cannot subsequently be changed. + * + * @opensearch.internal + */ +@InternalApi +public class MetricsRegistryFactory implements Closeable { + + private static final Logger logger = LogManager.getLogger(MetricsRegistryFactory.class); + + private final TelemetrySettings telemetrySettings; + private final MetricsRegistry metricsRegistry; + + public MetricsRegistryFactory(TelemetrySettings telemetrySettings, Optional telemetry) { + this.telemetrySettings = telemetrySettings; + this.metricsRegistry = metricsRegistry(telemetry); + } + + /** + * Returns the {@link MetricsRegistry} instance + * + * @return MetricsRegistry instance + */ + public MetricsRegistry getMetricsRegistry() { + return metricsRegistry; + } + + /** + * Closes the {@link Tracer} + */ + @Override + public void close() { + try { + metricsRegistry.close(); + } catch (IOException e) { + logger.warn("Error closing MetricsRegistry", e); + } + } + + private MetricsRegistry metricsRegistry(Optional telemetry) { + MetricsRegistry metricsRegistry = telemetry.map(Telemetry::getMetricsTelemetry) + .map(metricsTelemetry -> createDefaultMetricsRegistry(metricsTelemetry)) + .orElse(NoopMetricsRegistry.INSTANCE); + return metricsRegistry; + } + + private MetricsRegistry createDefaultMetricsRegistry(MetricsTelemetry metricsTelemetry) { + return new DefaultMetricsRegistry(metricsTelemetry); + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java b/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java new file mode 100644 index 0000000000000..5137cb18e2cc0 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; + +import java.util.Optional; + +/** + * No-op implementation of {@link MetricsRegistryFactory} + * + * @opensearch.internal + */ +@InternalApi +public class NoopMetricsRegistryFactory extends MetricsRegistryFactory { + public NoopMetricsRegistryFactory() { + super(null, Optional.empty()); + } + + @Override + public MetricsRegistry getMetricsRegistry() { + return NoopMetricsRegistry.INSTANCE; + } + + @Override + public void close() { + + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..ad4564e1d7773 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry.metrics; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java index e86b21ae0fd3b..b6b2cf360d1c5 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -60,8 +60,38 @@ private AttributeNames() { */ public static final String TRANSPORT_TARGET_HOST = "target_host"; + /** + * Transport Service send request local host. + */ + public static final String TRANSPORT_HOST = "host"; + /** * Action Name. */ public static final String TRANSPORT_ACTION = "action"; + + /** + * Index Name + */ + public static final String INDEX = "index"; + + /** + * Shard ID + */ + public static final String SHARD_ID = "shard_id"; + + /** + * Number of request items in bulk request + */ + public static final String BULK_REQUEST_ITEMS = "bulk_request_items"; + + /** + * Node ID + */ + public static final String NODE_ID = "node_id"; + + /** + * Refresh Policy + */ + public static final String REFRESH_POLICY = "refresh_policy"; } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index a93ce11f374fe..1dce422943b7a 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -8,11 +8,14 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.action.bulk.BulkShardRequest; +import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.annotation.InternalApi; import org.opensearch.core.common.Strings; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.transport.TcpChannel; import org.opensearch.transport.Transport; import java.util.Arrays; @@ -67,6 +70,10 @@ public static SpanCreationContext from(String action, Transport.Connection conne return SpanCreationContext.server().name(createSpanName(action, connection)).attributes(buildSpanAttributes(action, connection)); } + public static SpanCreationContext from(String spanName, String nodeId, ReplicatedWriteRequest request) { + return SpanCreationContext.server().name(spanName).attributes(buildSpanAttributes(nodeId, request)); + } + private static String createSpanName(HttpRequest httpRequest) { return httpRequest.method().name() + SEPARATOR + httpRequest.uri(); } @@ -127,4 +134,40 @@ private static Attributes buildSpanAttributes(String action, Transport.Connectio return attributes; } + /** + * Creates {@link SpanCreationContext} from Inbound Handler. + * @param action action. + * @param tcpChannel tcp channel. + * @return context + */ + public static SpanCreationContext from(String action, TcpChannel tcpChannel) { + return SpanCreationContext.server().name(createSpanName(action, tcpChannel)).attributes(buildSpanAttributes(action, tcpChannel)); + } + + private static String createSpanName(String action, TcpChannel tcpChannel) { + return action + SEPARATOR + (tcpChannel.getRemoteAddress() != null + ? tcpChannel.getRemoteAddress().getHostString() + : tcpChannel.getLocalAddress().getHostString()); + } + + private static Attributes buildSpanAttributes(String action, TcpChannel tcpChannel) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, action); + attributes.addAttribute(AttributeNames.TRANSPORT_HOST, tcpChannel.getLocalAddress().getHostString()); + return attributes; + } + + private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequest request) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.NODE_ID, nodeId) + .addAttribute(AttributeNames.REFRESH_POLICY, request.getRefreshPolicy().getValue()); + if (request.shardId() != null) { + attributes.addAttribute(AttributeNames.INDEX, request.shardId().getIndexName()) + .addAttribute(AttributeNames.SHARD_ID, request.shardId().getId()); + } + if (request instanceof BulkShardRequest) { + attributes.addAttribute(AttributeNames.BULK_REQUEST_ITEMS, ((BulkShardRequest) request).items().length); + } + return attributes; + } + } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java index b2308402379ac..631fb8242d78e 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java @@ -59,6 +59,11 @@ public SpanScope withSpanInScope(Span span) { return getDelegateTracer().withSpanInScope(span); } + @Override + public boolean isRecording() { + return getDelegateTracer().isRecording(); + } + @Override public void close() throws IOException { defaultTracer.close(); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java index 9229d334dea01..e0fb690bd29be 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing.channels; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; @@ -18,6 +17,7 @@ import java.net.InetSocketAddress; import java.util.Objects; +import java.util.Optional; /** * Tracer wrapped {@link HttpChannel} @@ -49,13 +49,20 @@ private TraceableHttpChannel(HttpChannel delegate, Span span, Tracer tracer) { * @return http channel */ public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableHttpChannel(delegate, span, tracer); } else { return delegate; } } + @Override + public void handleException(Exception ex) { + span.addEvent("The HttpChannel was closed without sending the response"); + span.setError(ex); + span.endSpan(); + } + @Override public void close() { delegate.close(); @@ -85,4 +92,9 @@ public InetSocketAddress getLocalAddress() { public InetSocketAddress getRemoteAddress() { return delegate.getRemoteAddress(); } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java index d256c9d4d0e53..32769dd1d848d 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java @@ -9,7 +9,6 @@ package org.opensearch.telemetry.tracing.channels; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.RestChannel; @@ -53,7 +52,7 @@ private TraceableRestChannel(RestChannel delegate, Span span, Tracer tracer) { * @return rest channel */ public static RestChannel create(RestChannel delegate, Span span, Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableRestChannel(delegate, span, tracer); } else { return delegate; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java new file mode 100644 index 0000000000000..45268b4807cd9 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.Version; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.transport.BaseTcpTransportChannel; +import org.opensearch.transport.TcpTransportChannel; +import org.opensearch.transport.TransportChannel; + +import java.io.IOException; +import java.util.Optional; + +/** + * Tracer wrapped {@link TransportChannel} + */ +public class TraceableTcpTransportChannel extends BaseTcpTransportChannel { + + private final TransportChannel delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + public TraceableTcpTransportChannel(TcpTransportChannel delegate, Span span, Tracer tracer) { + super(delegate.getChannel()); + this.delegate = delegate; + this.span = span; + this.tracer = tracer; + } + + /** + * Factory method. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return transport channel + */ + public static TransportChannel create(TcpTransportChannel delegate, final Span span, final Tracer tracer) { + if (tracer.isRecording() == true) { + delegate.getChannel().addCloseListener(new ActionListener() { + @Override + public void onResponse(Void unused) { + onFailure(null); + } + + @Override + public void onFailure(Exception e) { + span.addEvent("The TransportChannel was closed without sending the response"); + span.setError(e); + span.endSpan(); + } + }); + + return new TraceableTcpTransportChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public String getProfileName() { + return delegate.getProfileName(); + } + + @Override + public String getChannelType() { + return delegate.getChannelType(); + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(response); + } catch (final IOException ex) { + span.setError(ex); + throw ex; + } finally { + span.endSpan(); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(exception); + } finally { + span.setError(exception); + span.endSpan(); + } + } + + @Override + public Version getVersion() { + return delegate.getVersion(); + } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java index abddfcc6cebc1..eb9d53d2df51b 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing.handler; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; import org.opensearch.telemetry.tracing.Span; @@ -55,7 +54,7 @@ public static TransportResponseHandler create( Span span, Tracer tracer ) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableTransportResponseHandler(delegate, span, tracer); } else { return delegate; @@ -101,6 +100,7 @@ public void handleRejection(Exception exp) { try (SpanScope scope = tracer.withSpanInScope(span)) { delegate.handleRejection(exp); } finally { + span.setError(exp); span.endSpan(); } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java index 3e201641a529b..0cb4ce71d05f8 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing.listener; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanScope; @@ -47,7 +46,7 @@ private TraceableActionListener(ActionListener delegate, Span span, Tr * @return action listener */ public static ActionListener create(ActionListener delegate, Span span, Tracer tracer) { - if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + if (tracer.isRecording() == true) { return new TraceableActionListener(delegate, span, tracer); } else { return delegate; diff --git a/server/src/main/java/org/opensearch/threadpool/Scheduler.java b/server/src/main/java/org/opensearch/threadpool/Scheduler.java index 86c322ec89dd7..4a7c63a1b559a 100644 --- a/server/src/main/java/org/opensearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/opensearch/threadpool/Scheduler.java @@ -60,7 +60,7 @@ public interface Scheduler { /** * Create a scheduler that can be used client side. Server side, please use ThreadPool.schedule instead. - * + *

                            * Notice that if any scheduled jobs fail with an exception, these will bubble up to the uncaught exception handler where they will * be logged as a warning. This includes jobs started using execute, submit and schedule. * @param settings the settings to use @@ -178,7 +178,7 @@ interface ScheduledCancellable extends Delayed, Cancellable {} * This class encapsulates the scheduling of a {@link Runnable} that needs to be repeated on a interval. For example, checking a value * for cleanup every second could be done by passing in a Runnable that can perform the check and the specified interval between * executions of this runnable. NOTE: the runnable is only rescheduled to run again after completion of the runnable. - * + *

                            * For this class, completion means that the call to {@link Runnable#run()} returned or an exception was thrown and caught. In * case of an exception, this class will log the exception and reschedule the runnable for its next execution. This differs from the * {@link ScheduledThreadPoolExecutor#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} semantics as an exception there would diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java index 183b9b2f4cf9a..90f50f78d84ad 100644 --- a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -25,7 +25,7 @@ /** * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to * entities listening to the events. - * + *

                            * It's able to associate runnable with a task with the help of task Id available in thread context. */ public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 8375ac34972af..5f10986239300 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -115,6 +115,7 @@ public static class Names { public static final String TRANSLOG_SYNC = "translog_sync"; public static final String REMOTE_PURGE = "remote_purge"; public static final String REMOTE_REFRESH_RETRY = "remote_refresh_retry"; + public static final String REMOTE_RECOVERY = "remote_recovery"; public static final String INDEX_SEARCHER = "index_searcher"; } @@ -184,6 +185,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.TRANSLOG_SYNC, ThreadPoolType.FIXED); map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); + map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); } @@ -269,6 +271,15 @@ public ThreadPool( Names.REMOTE_REFRESH_RETRY, new ScalingExecutorBuilder(Names.REMOTE_REFRESH_RETRY, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) ); + builders.put( + Names.REMOTE_RECOVERY, + new ScalingExecutorBuilder( + Names.REMOTE_RECOVERY, + 1, + twiceAllocatedProcessors(allocatedProcessors), + TimeValue.timeValueMinutes(5) + ) + ); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { builders.put( Names.INDEX_SEARCHER, @@ -314,7 +325,7 @@ public ThreadPool( /** * Returns a value of milliseconds that may be used for relative time calculations. - * + *

                            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -324,7 +335,7 @@ public long relativeTimeInMillis() { /** * Returns a value of nanoseconds that may be used for relative time calculations. - * + *

                            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -337,7 +348,7 @@ public long relativeTimeInNanos() { * that require the highest precision possible. Performance critical code must use * either {@link #relativeTimeInNanos()} or {@link #relativeTimeInMillis()} which * give better performance at the cost of lower precision. - * + *

                            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -347,7 +358,7 @@ public long preciseRelativeTimeInNanos() { /** * Returns the value of milliseconds since UNIX epoch. - * + *

                            * This method should only be used for exact date/time formatting. For calculating * time deltas that should not suffer from negative deltas, which are possible with * this method, see {@link #relativeTimeInMillis()}. @@ -643,7 +654,7 @@ public String toString() { /** * A thread to cache millisecond time values from * {@link System#nanoTime()} and {@link System#currentTimeMillis()}. - * + *

                            * The values are updated at a specified interval. */ static class CachedTimeThread extends Thread { diff --git a/server/src/main/java/org/opensearch/transport/BaseTcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/BaseTcpTransportChannel.java new file mode 100644 index 0000000000000..14e065d3350c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/BaseTcpTransportChannel.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +/** + * Base class TcpTransportChannel + */ +public abstract class BaseTcpTransportChannel implements TransportChannel { + private final TcpChannel channel; + + /** + * Constructor. + * @param channel tcp channel + */ + public BaseTcpTransportChannel(TcpChannel channel) { + this.channel = channel; + } + + /** + * Returns {@link TcpChannel} + * @return TcpChannel + */ + public TcpChannel getChannel() { + return channel; + } + +} diff --git a/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java b/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java index 5cb169439a14d..57707d3b44477 100644 --- a/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java +++ b/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java @@ -48,11 +48,11 @@ * requires that the underlying {@link DeflaterOutputStream} be closed to write EOS bytes. However, the * {@link BytesStream} should not be closed yet, as we have not used the bytes. This class handles these * intricacies. - * + *

                            * {@link CompressibleBytesOutputStream#materializeBytes()} should be called when all the bytes have been * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. * The underlying {@link BytesReference} will be returned. - * + *

                            * {@link CompressibleBytesOutputStream#close()} will NOT close the underlying stream. The byte stream passed * in the constructor must be closed individually. * diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index 9f9232c18079a..c14a53e799319 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -46,6 +46,11 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; import java.io.EOFException; @@ -74,6 +79,8 @@ public class InboundHandler { private volatile long slowLogThresholdMs = Long.MAX_VALUE; + private final Tracer tracer; + InboundHandler( ThreadPool threadPool, OutboundHandler outboundHandler, @@ -81,7 +88,8 @@ public class InboundHandler { TransportHandshaker handshaker, TransportKeepAlive keepAlive, Transport.RequestHandlers requestHandlers, - Transport.ResponseHandlers responseHandlers + Transport.ResponseHandlers responseHandlers, + Tracer tracer ) { this.threadPool = threadPool; this.outboundHandler = outboundHandler; @@ -90,6 +98,7 @@ public class InboundHandler { this.keepAlive = keepAlive; this.requestHandlers = requestHandlers; this.responseHandlers = responseHandlers; + this.tracer = tracer; } void setMessageListener(TransportMessageListener listener) { @@ -108,7 +117,6 @@ void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception final long startTime = threadPool.relativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); TransportLogger.logInboundMessage(channel, message); - if (message.isPing()) { keepAlive.receiveKeepAlive(channel); } else { @@ -123,7 +131,6 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st final InetSocketAddress remoteAddress = channel.getRemoteAddress(); final Header header = message.getHeader(); assert header.needsToReadVariableHeader() == false; - ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext existing = threadContext.stashContext()) { // Place the context with the headers from the message @@ -165,6 +172,7 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); } } + } } finally { final long took = threadPool.relativeTimeInMillis() - startTime; @@ -184,80 +192,89 @@ private void handleRequest(TcpChannel channel, Head final String action = header.getActionName(); final long requestId = header.getRequestId(); final Version version = header.getVersion(); - if (header.isHandshake()) { - messageListener.onRequestReceived(requestId, action); - // Cannot short circuit handshakes - assert message.isShortCircuit() == false; - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final TransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - try { - handshaker.handleHandshake(transportChannel, requestId, stream); - } catch (Exception e) { - if (Version.CURRENT.isCompatible(header.getVersion())) { - sendErrorResponse(action, transportChannel, e); - } else { - logger.warn( - new ParameterizedMessage( - "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", - channel, - header.getVersion() - ), - e - ); - channel.close(); - } - } - } else { - final TransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - try { + Span span = tracer.startSpan(SpanBuilder.from(action, channel)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + if (header.isHandshake()) { messageListener.onRequestReceived(requestId, action); - if (message.isShortCircuit()) { - sendErrorResponse(action, transportChannel, message.getException()); - } else { - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final RequestHandlerRegistry reg = requestHandlers.getHandler(action); - assert reg != null; - - final T request = newRequest(requestId, action, stream, reg); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - checkStreamIsFullyConsumed(requestId, action, stream); - - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, transportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); - } + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + handshaker.handleHandshake(traceableTransportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, traceableTransportChannel, e); } else { - threadPool.executor(executor).execute(new RequestHandler<>(reg, request, transportChannel)); + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); } } - } catch (Exception e) { - sendErrorResponse(action, transportChannel, e); + } else { + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, traceableTransportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, traceableTransportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), traceableTransportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, traceableTransportChannel, e); + } } + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; } } diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java index 1599c4cb75517..8a5f6dfffb036 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java @@ -54,10 +54,10 @@ * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not * fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the * remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. - * + *

                            * This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes * in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. - * + *

                            * In the case of a disconnection, this class will issue a re-connect task to establish at most * {@link SniffConnectionStrategy#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of * connections per cluster has been reached. @@ -123,7 +123,7 @@ void ensureConnected(ActionListener listener) { /** * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns null if the node ID is not found. - * + *

                            * The requests to get cluster state on the connected cluster are made in the system context because logically * they are equivalent to checking a single detail in the local cluster state and should not require that the * user who made the request that is using this method in its implementation is authorized to view the entire diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index 464282730d2b2..98c182c562928 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -91,14 +91,14 @@ public void processMessageReceived(Request request, TransportChannel channel) th Releasable unregisterTask = () -> taskManager.unregister(task); try { - if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { + if (channel instanceof BaseTcpTransportChannel && task instanceof CancellableTask) { if (request instanceof ShardSearchRequest) { // on receiving request, update the inbound network time to reflect time spent in transit over the network ((ShardSearchRequest) request).setInboundNetworkTime( Math.max(0, System.currentTimeMillis() - ((ShardSearchRequest) request).getInboundNetworkTime()) ); } - final TcpChannel tcpChannel = ((TcpTransportChannel) channel).getChannel(); + final TcpChannel tcpChannel = ((BaseTcpTransportChannel) channel).getChannel(); final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(tcpChannel, (CancellableTask) task); unregisterTask = Releasables.wrap(unregisterTask, stopTracking); } diff --git a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java index 052611317f174..4dab0039ec878 100644 --- a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java @@ -37,6 +37,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * Transport channel for tasks @@ -89,4 +90,9 @@ public Version getVersion() { public TransportChannel getChannel() { return channel; } + + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index eac137ec30f1a..f98b65d0a4df1 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -38,6 +38,7 @@ import org.opensearch.core.common.bytes.BytesReference; import java.net.InetSocketAddress; +import java.util.Optional; /** * This is a tcp channel representing a single channel connection to another node. It is the base channel @@ -96,6 +97,20 @@ public interface TcpChannel extends CloseableChannel { */ ChannelStats getChannelStats(); + /** + * Returns the contextual property associated with this specific TCP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } + /** * Channel statistics * diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 7da7dcad13120..d0e6516973382 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -68,6 +68,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -159,7 +160,8 @@ public TcpTransport( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { this.settings = settings; this.profileSettings = getProfileSettings(settings); @@ -208,7 +210,8 @@ public TcpTransport( handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + tracer ); } diff --git a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java index 00702d08902a9..81de0af07ea7c 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java @@ -38,6 +38,7 @@ import org.opensearch.search.query.QuerySearchResult; import java.io.IOException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -46,11 +47,10 @@ * * @opensearch.internal */ -public final class TcpTransportChannel implements TransportChannel { +public final class TcpTransportChannel extends BaseTcpTransportChannel { private final AtomicBoolean released = new AtomicBoolean(); private final OutboundHandler outboundHandler; - private final TcpChannel channel; private final String action; private final long requestId; private final Version version; @@ -70,9 +70,9 @@ public final class TcpTransportChannel implements TransportChannel { boolean isHandshake, Releasable breakerRelease ) { + super(channel); this.version = version; this.features = features; - this.channel = channel; this.outboundHandler = outboundHandler; this.action = action; this.requestId = requestId; @@ -83,7 +83,7 @@ public final class TcpTransportChannel implements TransportChannel { @Override public String getProfileName() { - return channel.getProfile(); + return getChannel().getProfile(); } @Override @@ -93,7 +93,7 @@ public void sendResponse(TransportResponse response) throws IOException { // update outbound network time with current time before sending response over network ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); } - outboundHandler.sendResponse(version, features, channel, requestId, action, response, compressResponse, isHandshake); + outboundHandler.sendResponse(version, features, getChannel(), requestId, action, response, compressResponse, isHandshake); } finally { release(false); } @@ -102,7 +102,7 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(Exception exception) throws IOException { try { - outboundHandler.sendErrorResponse(version, features, channel, requestId, action, exception); + outboundHandler.sendErrorResponse(version, features, getChannel(), requestId, action, exception); } finally { release(true); } @@ -131,7 +131,8 @@ public Version getVersion() { return version; } - public TcpChannel getChannel() { - return channel; + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index 3c582127f28e8..7423d59103302 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -39,6 +39,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * A transport channel allows to send a response to a request on the channel. @@ -78,4 +79,18 @@ static void sendErrorResponse(TransportChannel channel, String actionName, Trans ); } } + + /** + * Returns the contextual property associated with this specific transport channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property. + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 90e94e52515ce..8992af18edb48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -57,7 +57,7 @@ public interface TransportResponseHandler extends W * It should be used to clear up the resources held by the {@link TransportResponseHandler}. * @param exp exception */ - default void handleRejection(Exception exp) {}; + default void handleRejection(Exception exp) {} default TransportResponseHandler wrap(Function converter, Writeable.Reader reader) { final TransportResponseHandler self = this; diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 32bedb52a9cef..de88c3619abe8 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -170,12 +170,12 @@ public void close() {} }; static { - /** - * Registers server specific types as a streamables for serialization - * over the {@link StreamOutput} and {@link StreamInput} wire + /* + Registers server specific types as a streamables for serialization + over the {@link StreamOutput} and {@link StreamInput} wire */ Streamables.registerStreamables(); - /** Registers OpenSearch server specific exceptions (exceptions outside of core library) */ + /* Registers OpenSearch server specific exceptions (exceptions outside of core library) */ OpenSearchServerException.registerExceptions(); } diff --git a/server/src/main/java/org/opensearch/watcher/FileWatcher.java b/server/src/main/java/org/opensearch/watcher/FileWatcher.java index 82f95d6a1622c..d773e3b5d7c9e 100644 --- a/server/src/main/java/org/opensearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/opensearch/watcher/FileWatcher.java @@ -44,7 +44,7 @@ /** * File resources watcher - * + *

                            * The file watcher checks directory and all its subdirectories for file changes and notifies its listeners accordingly * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java index 9b9c00cd4252f..62002f9c6c323 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java @@ -49,7 +49,7 @@ /** * Generic resource watcher service - * + *

                            * Other opensearch services can register their resource watchers with this service using {@link #add(ResourceWatcher)} * method. This service will call {@link org.opensearch.watcher.ResourceWatcher#checkAndNotify()} method of all * registered watcher periodically. The frequency of checks can be specified using {@code resource.reload.interval} setting, which diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index 7546a2d7c898e..d83539a2fef61 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -976,7 +976,7 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { /** * Builds a {@link ToXContent} using a JSON XContentBuilder and compares the result to the given json in string format. - * + *

                            * By default, the stack trace of the exception is not rendered. The parameter `errorTrace` forces the stack trace to * be rendered like the REST API does when the "error_trace" parameter is set to true. */ diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e3f16463a5328..3050d1674a95b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -36,10 +36,12 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchRequestStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +49,10 @@ import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.http.HttpStats; import org.opensearch.index.ReplicationStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.RemoteTranslogStats; @@ -59,6 +63,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; @@ -69,6 +75,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -282,6 +289,10 @@ public void testSerialization() throws IOException { assertEquals(ioStats.getTotalReadOperations(), deserializedIoStats.getTotalReadOperations()); assertEquals(ioStats.getTotalWriteKilobytes(), deserializedIoStats.getTotalWriteKilobytes()); assertEquals(ioStats.getTotalWriteOperations(), deserializedIoStats.getTotalWriteOperations()); + assertEquals(ioStats.getTotalReadTime(), deserializedIoStats.getTotalReadTime()); + assertEquals(ioStats.getTotalWriteTime(), deserializedIoStats.getTotalWriteTime()); + assertEquals(ioStats.getTotalQueueSize(), deserializedIoStats.getTotalQueueSize()); + assertEquals(ioStats.getTotalIOTimeMillis(), deserializedIoStats.getTotalIOTimeMillis()); assertEquals(ioStats.getDevicesStats().length, deserializedIoStats.getDevicesStats().length); for (int i = 0; i < ioStats.getDevicesStats().length; i++) { FsInfo.DeviceStats deviceStats = ioStats.getDevicesStats()[i]; @@ -342,6 +353,25 @@ public void testSerialization() throws IOException { assertEquals(queueStats.getTotal(), deserializedDiscoveryStats.getQueueStats().getTotal()); assertEquals(queueStats.getPending(), deserializedDiscoveryStats.getQueueStats().getPending()); } + ClusterStateStats stateStats = discoveryStats.getClusterStateStats(); + if (stateStats == null) { + assertNull(deserializedDiscoveryStats.getClusterStateStats()); + } else { + assertEquals(stateStats.getUpdateFailed(), deserializedDiscoveryStats.getClusterStateStats().getUpdateFailed()); + assertEquals(stateStats.getUpdateSuccess(), deserializedDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + assertEquals( + stateStats.getUpdateTotalTimeInMillis(), + deserializedDiscoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() + ); + assertEquals(1, deserializedDiscoveryStats.getClusterStateStats().getPersistenceStats().size()); + PersistedStateStats deserializedRemoteStateStats = deserializedDiscoveryStats.getClusterStateStats() + .getPersistenceStats() + .get(0); + PersistedStateStats remoteStateStats = stateStats.getPersistenceStats().get(0); + assertEquals(remoteStateStats.getFailedCount(), deserializedRemoteStateStats.getFailedCount()); + assertEquals(remoteStateStats.getSuccessCount(), deserializedRemoteStateStats.getSuccessCount()); + assertEquals(remoteStateStats.getTotalTimeInMillis(), deserializedRemoteStateStats.getTotalTimeInMillis()); + } } IngestStats ingestStats = nodeStats.getIngestStats(); IngestStats deserializedIngestStats = deserializedNodeStats.getIngestStats(); @@ -393,6 +423,35 @@ public void testSerialization() throws IOException { assertEquals(aStats.responseTime, bStats.responseTime, 0.01); }); } + NodesResourceUsageStats resourceUsageStats = nodeStats.getResourceUsageStats(); + NodesResourceUsageStats deserializedResourceUsageStats = deserializedNodeStats.getResourceUsageStats(); + if (resourceUsageStats == null) { + assertNull(deserializedResourceUsageStats); + } else { + resourceUsageStats.getNodeIdToResourceUsageStatsMap().forEach((k, v) -> { + NodeResourceUsageStats aResourceUsageStats = resourceUsageStats.getNodeIdToResourceUsageStatsMap().get(k); + NodeResourceUsageStats bResourceUsageStats = deserializedResourceUsageStats.getNodeIdToResourceUsageStatsMap() + .get(k); + assertEquals( + aResourceUsageStats.getMemoryUtilizationPercent(), + bResourceUsageStats.getMemoryUtilizationPercent(), + 0.0 + ); + assertEquals(aResourceUsageStats.getCpuUtilizationPercent(), bResourceUsageStats.getCpuUtilizationPercent(), 0.0); + assertEquals(aResourceUsageStats.getTimestamp(), bResourceUsageStats.getTimestamp()); + }); + } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = nodeStats.getSegmentReplicationRejectionStats(); + SegmentReplicationRejectionStats deserializedSegmentReplicationRejectionStats = deserializedNodeStats + .getSegmentReplicationRejectionStats(); + if (segmentReplicationRejectionStats == null) { + assertNull(deserializedSegmentReplicationRejectionStats); + } else { + assertEquals( + segmentReplicationRejectionStats.getTotalRejectionCount(), + deserializedSegmentReplicationRejectionStats.getTotalRejectionCount() + ); + } ScriptCacheStats scriptCacheStats = nodeStats.getScriptCacheStats(); ScriptCacheStats deserializedScriptCacheStats = deserializedNodeStats.getScriptCacheStats(); if (scriptCacheStats == null) { @@ -625,6 +684,10 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), null ); deviceStatsArray[i] = new FsInfo.DeviceStats( @@ -635,6 +698,10 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), previousDeviceStats ); } @@ -681,12 +748,16 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { ScriptStats scriptStats = frequently() ? new ScriptStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) : null; + ClusterStateStats stateStats = new ClusterStateStats(); + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + stateStats.setPersistenceStats(Arrays.asList(remoteStateStats)); DiscoveryStats discoveryStats = frequently() ? new DiscoveryStats( randomBoolean() ? new PendingClusterStateStats(randomInt(), randomInt(), randomInt()) : null, randomBoolean() ? new PublishClusterStateStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) - : null + : null, + randomBoolean() ? stateStats : null ) : null; IngestStats ingestStats = null; @@ -756,6 +827,35 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { } adaptiveSelectionStats = new AdaptiveSelectionStats(nodeConnections, nodeStats); } + NodesResourceUsageStats nodesResourceUsageStats = null; + if (frequently()) { + int numNodes = randomIntBetween(0, 10); + Map nodeConnections = new HashMap<>(); + Map resourceUsageStatsMap = new HashMap<>(); + for (int i = 0; i < numNodes; i++) { + String nodeId = randomAlphaOfLengthBetween(3, 10); + // add outgoing connection info + if (frequently()) { + nodeConnections.put(nodeId, randomLongBetween(0, 100)); + } + // add node calculations + if (frequently()) { + NodeResourceUsageStats stats = new NodeResourceUsageStats( + nodeId, + System.currentTimeMillis(), + randomDoubleBetween(1.0, 100.0, true), + randomDoubleBetween(1.0, 100.0, true) + ); + resourceUsageStatsMap.put(nodeId, stats); + } + } + nodesResourceUsageStats = new NodesResourceUsageStats(resourceUsageStatsMap); + } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = null; + if (frequently()) { + segmentReplicationRejectionStats = new SegmentReplicationRejectionStats(randomNonNegativeLong()); + } + ClusterManagerThrottlingStats clusterManagerThrottlingStats = null; if (frequently()) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); @@ -787,6 +887,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { discoveryStats, ingestStats, adaptiveSelectionStats, + nodesResourceUsageStats, scriptCacheStats, null, null, @@ -795,6 +896,8 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { weightedRoutingStats, null, null, + null, + segmentReplicationRejectionStats, null ); } @@ -815,6 +918,7 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.setMaxRefreshTimeLag(2L); remoteSegmentStats.addTotalUploadTime(20L); remoteSegmentStats.addTotalDownloadTime(20L); + remoteSegmentStats.addTotalRejections(5L); RemoteTranslogStats remoteTranslogStats = indicesStats.getTranslog().getRemoteTranslogStats(); RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(getRandomRemoteTranslogTransferTrackerStats()); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 07e149dd72164..a3fa0f9cb16e4 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -211,7 +211,8 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, diff --git a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 67846efab2af8..d35c821b41aa0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; @@ -56,7 +56,7 @@ protected Collection> getPlugins() { public void setupIndex() { Settings settings = Settings.builder() // don't allow any merges so that the num docs is the expected segments - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); createIndex("test", settings); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 0f67eff26cbde..cf7080ab2fc06 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -54,6 +54,7 @@ import org.opensearch.index.VersionType; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -155,7 +156,8 @@ private void indicesThatCannotBeCreatedTestCase( new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), null, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) { @Override void executeBulk( diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 515f6eae28a34..141c630b94020 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -70,6 +70,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -172,7 +173,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), null, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 10cad6fb147a2..6bbd740df7f9c 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -118,7 +118,8 @@ class TestTransportBulkAction extends TransportBulkAction { new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(), new SystemIndices(emptyMap())), new IndexingPressureService(Settings.EMPTY, clusterService), mock(IndicesService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index 852e3837e1e7a..9d5b4430ea395 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -282,7 +282,8 @@ static class TestTransportBulkAction extends TransportBulkAction { new IndexingPressureService(Settings.EMPTY, clusterService), null, new SystemIndices(emptyMap()), - relativeTimeProvider + relativeTimeProvider, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index fe0fdd07025d9..b325cfa197933 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -88,6 +88,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; @@ -1074,7 +1075,8 @@ public void testHandlePrimaryTermValidationRequestWithDifferentAllocationId() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId + "-1", 1, shardId), @@ -1105,7 +1107,8 @@ public void testHandlePrimaryTermValidationRequestWithOlderPrimaryTerm() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1136,7 +1139,8 @@ public void testHandlePrimaryTermValidationRequestSuccess() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1178,7 +1182,8 @@ private TransportShardBulkAction createAction() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java index 705fb546a2fed..41f782e308785 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -85,7 +85,7 @@ public void init() throws IOException { when(ingestService.getProcessorFactories()).thenReturn(registry); } - public void testParseUsingPipelineStore(boolean useExplicitType) throws Exception { + public void testParseUsingPipelineStore() throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -131,7 +131,7 @@ public void testParseUsingPipelineStore(boolean useExplicitType) throws Exceptio assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(1)); } - public void innerTestParseWithProvidedPipeline() throws Exception { + public void testParseWithProvidedPipeline() throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -144,17 +144,29 @@ public void innerTestParseWithProvidedPipeline() throws Exception { List fields = Arrays.asList(INDEX, ID, ROUTING, VERSION, VERSION_TYPE, IF_SEQ_NO, IF_PRIMARY_TERM); for (IngestDocument.Metadata field : fields) { if (field == VERSION) { - Long value = randomLong(); - doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + if (randomBoolean()) { + Long value = randomLong(); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + int value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); + } } else if (field == VERSION_TYPE) { String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)); doc.put(field.getFieldName(), value); expectedDoc.put(field.getFieldName(), value); } else if (field == IF_SEQ_NO || field == IF_PRIMARY_TERM) { - Long value = randomNonNegativeLong(); - doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + if (randomBoolean()) { + Long value = randomNonNegativeLong(); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + int value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); + } } else { if (randomBoolean()) { String value = randomAlphaOfLengthBetween(1, 10); @@ -282,4 +294,40 @@ public void testNotValidDocs() { ); assertThat(e3.getMessage(), containsString("required property is missing")); } + + public void testNotValidMetadataFields() { + List fields = Arrays.asList(VERSION, IF_SEQ_NO, IF_PRIMARY_TERM); + for (IngestDocument.Metadata field : fields) { + String metadataFieldName = field.getFieldName(); + Map requestContent = new HashMap<>(); + List> docs = new ArrayList<>(); + requestContent.put(Fields.DOCS, docs); + Map doc = new HashMap<>(); + doc.put(metadataFieldName, randomAlphaOfLengthBetween(1, 10)); + doc.put(Fields.SOURCE, Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); + docs.add(doc); + + Map pipelineConfig = new HashMap<>(); + List> processors = new ArrayList<>(); + Map processorConfig = new HashMap<>(); + List> onFailureProcessors = new ArrayList<>(); + int numOnFailureProcessors = randomIntBetween(0, 1); + for (int j = 0; j < numOnFailureProcessors; j++) { + onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap())); + } + if (numOnFailureProcessors > 0) { + processorConfig.put("on_failure", onFailureProcessors); + } + processors.add(Collections.singletonMap("mock_processor", processorConfig)); + pipelineConfig.put("processors", processors); + + requestContent.put(Fields.PIPELINE, pipelineConfig); + + assertThrows( + "Failed to parse parameter [" + metadataFieldName + "], only int or long is accepted", + IllegalArgumentException.class, + () -> SimulatePipelineRequest.parse(requestContent, false, ingestService) + ); + } + } } diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index fbac465f946f4..da87a0a967f53 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -135,7 +135,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { new NetworkService(emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) ) { @@ -202,7 +203,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { shardStateAction, new ActionFilters(new HashSet<>()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertThat(action.globalBlockLevel(), nullValue()); @@ -255,7 +257,8 @@ private TransportResyncReplicationAction createAction() { mock(ShardStateAction.class), new ActionFilters(new HashSet<>()), mock(IndexingPressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } } diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index f628bb3201452..edac50813e191 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -688,7 +688,11 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct ); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); - + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); return new SearchDfsQueryThenFetchAsyncAction( logger, null, @@ -702,7 +706,7 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct searchRequest, listener, shardsIter, - null, + timeProvider, null, task, SearchResponse.Clusters.EMPTY, @@ -734,6 +738,11 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( ); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); return new SearchQueryThenFetchAsyncAction( logger, null, @@ -747,7 +756,7 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( searchRequest, listener, shardsIter, - null, + timeProvider, null, task, SearchResponse.Clusters.EMPTY, diff --git a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java index 3efcadbfb320d..8042a7e296869 100644 --- a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java +++ b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java @@ -46,7 +46,6 @@ import java.time.ZoneId; import java.util.Arrays; -import static org.opensearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; @@ -136,7 +135,11 @@ public void testWithDates() { for (boolean reverse : new boolean[] { true, false }) { SortField[] sortFields = new SortField[] { new SortField("foo", SortField.Type.LONG, reverse) }; DocValueFormat[] sortFormats = new DocValueFormat[] { - new DocValueFormat.DateTime(DEFAULT_DATE_TIME_FORMATTER, ZoneId.of("UTC"), DateFieldMapper.Resolution.MILLISECONDS) }; + new DocValueFormat.DateTime( + DateFieldMapper.getDefaultDateTimeFormatter(), + ZoneId.of("UTC"), + DateFieldMapper.Resolution.MILLISECONDS + ) }; BottomSortValuesCollector collector = new BottomSortValuesCollector(3, sortFields); collector.consumeTopDocs( createTopDocs(sortFields[0], 100, newDateArray("2017-06-01T12:18:20Z", "2018-04-03T15:10:27Z", "2013-06-01T13:10:20Z")), @@ -170,7 +173,11 @@ public void testWithDateNanos() { for (boolean reverse : new boolean[] { true, false }) { SortField[] sortFields = new SortField[] { new SortField("foo", SortField.Type.LONG, reverse) }; DocValueFormat[] sortFormats = new DocValueFormat[] { - new DocValueFormat.DateTime(DEFAULT_DATE_TIME_FORMATTER, ZoneId.of("UTC"), DateFieldMapper.Resolution.NANOSECONDS) }; + new DocValueFormat.DateTime( + DateFieldMapper.getDefaultDateTimeFormatter(), + ZoneId.of("UTC"), + DateFieldMapper.Resolution.NANOSECONDS + ) }; BottomSortValuesCollector collector = new BottomSortValuesCollector(3, sortFields); collector.consumeTopDocs( createTopDocs(sortFields[0], 100, newDateNanoArray("2017-06-01T12:18:20Z", "2018-04-03T15:10:27Z", "2013-06-01T13:10:20Z")), @@ -242,7 +249,7 @@ private Object[] newBytesArray(String... values) { private Object[] newDateArray(String... values) { Long[] longs = new Long[values.length]; for (int i = 0; i < values.length; i++) { - longs[i] = DEFAULT_DATE_TIME_FORMATTER.parseMillis(values[i]); + longs[i] = DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(values[i]); } return longs; } @@ -250,7 +257,7 @@ private Object[] newDateArray(String... values) { private Object[] newDateNanoArray(String... values) { Long[] longs = new Long[values.length]; for (int i = 0; i < values.length; i++) { - longs[i] = DateUtils.toNanoSeconds(DEFAULT_DATE_TIME_FORMATTER.parseMillis(values[i])); + longs[i] = DateUtils.toNanoSeconds(DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(values[i])); } return longs; } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 2643aa5b6db01..6cbe458a35ef8 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -339,8 +339,8 @@ public void onFailure(Exception e) { createListener.onFailure(new Exception("Exception occurred in phase 1")); latch.await(); assertEquals(0, updateNodesInvoked.size()); - /** - * cleanup is not called on create pit phase one failure + /* + cleanup is not called on create pit phase one failure */ assertEquals(0, deleteNodesInvoked.size()); } @@ -438,8 +438,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } @@ -526,8 +526,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java new file mode 100644 index 0000000000000..a2e301143d694 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.BoostingQueryBuilder; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.ScoreSortBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +public final class SearchQueryCategorizerTests extends OpenSearchTestCase { + + private MetricsRegistry metricsRegistry; + + private SearchQueryCategorizer searchQueryCategorizer; + + @Before + public void setup() { + metricsRegistry = mock(MetricsRegistry.class); + when(metricsRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenAnswer( + invocation -> mock(Counter.class) + ); + searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } + + public void testAggregationsQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.aggregation( + new MultiTermsAggregationBuilder("agg1").terms( + Arrays.asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("username").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName("rating").build() + ) + ) + ); + sourceBuilder.size(0); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + } + + public void testBoolQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new BoolQueryBuilder().must(new MatchQueryBuilder("searchText", "fox"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testFunctionScoreQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.functionScoreCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchPhraseQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchPhraseQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchPhrasePrefixCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMultiMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new MultiMatchQueryBuilder("foo bar", "myField")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.multiMatchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testOtherQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + BoostingQueryBuilder queryBuilder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "foo"), + new MatchNoneQueryBuilder() + ); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.otherQueryCounter, times(2)).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testQueryStringQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.queryStringQueryCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testRangeQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); + rangeQuery.gte("1970-01-01"); + rangeQuery.lt("1982-01-01"); + sourceBuilder.query(rangeQuery); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.rangeCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testRegexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(new RegexpQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testSortQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort(new ScoreSortBuilder()); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); + } + + public void testTermQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.termQuery("field", "value2")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testWildcardQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new WildcardQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.wildcardCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testComplexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + + TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("field", "value2"); + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("tags", "php"); + RegexpQueryBuilder regexpQueryBuilder = new RegexpQueryBuilder("field", "text"); + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().must(termQueryBuilder) + .filter(matchQueryBuilder) + .should(regexpQueryBuilder); + sourceBuilder.query(boolQueryBuilder); + sourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 25d8c5551880f..f025e3a63b9bf 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -244,6 +244,9 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); mutators.add(() -> mutation.setCcsMinimizeRoundtrips(searchRequest.isCcsMinimizeRoundtrips() == false)); + mutators.add( + () -> mutation.setPhaseTook(searchRequest.isPhaseTook() == null ? randomBoolean() : searchRequest.isPhaseTook() == false) + ); mutators.add( () -> mutation.setCancelAfterTimeInterval( searchRequest.getCancelAfterTimeInterval() != null diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index 097e922147698..c9e59ab4ea04d 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -74,7 +74,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import static java.util.Collections.singletonMap; @@ -152,6 +154,11 @@ public SearchResponse createTestItem( Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); int numReducePhases = randomIntBetween(1, 10); long tookInMillis = randomNonNegativeLong(); + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), randomNonNegativeLong()); + } + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); @@ -182,6 +189,7 @@ public SearchResponse createTestItem( successfulShards, skippedShards, tookInMillis, + phaseTook, shardSearchFailures, randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, null @@ -353,6 +361,14 @@ public void testToXContent() { assertEquals(1, searchExtBuilders.size()); } { + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), 0L); + } + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 25L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 30L); + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); SearchResponse response = new SearchResponse( new InternalSearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), @@ -368,13 +384,24 @@ public void testToXContent() { 0, 0, 0, + phaseTook, ShardSearchFailure.EMPTY_ARRAY, - new SearchResponse.Clusters(5, 3, 2) + new SearchResponse.Clusters(5, 3, 2), + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); { expectedString.append("\"took\":0,"); + expectedString.append("\"phase_took\":"); + { + expectedString.append("{\"dfs_pre_query\":0,"); + expectedString.append("\"query\":50,"); + expectedString.append("\"fetch\":25,"); + expectedString.append("\"dfs_query\":0,"); + expectedString.append("\"expand\":30,"); + expectedString.append("\"can_match\":0},"); + } expectedString.append("\"timed_out\":false,"); expectedString.append("\"_shards\":"); { @@ -477,6 +504,24 @@ public void testToXContentEmptyClusters() throws IOException { assertEquals(0, builder.toString().length()); } + public void testSearchResponsePhaseTookEquals() throws IOException { + SearchResponse.PhaseTook phaseTookA = new SearchResponse.PhaseTook(Map.of("foo", 0L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookB = new SearchResponse.PhaseTook(Map.of("foo", 1L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookC = new SearchResponse.PhaseTook(Map.of("foo", 0L)); + SearchResponse.PhaseTook phaseTookD = new SearchResponse.PhaseTook(Map.of()); + + assertNotEquals(phaseTookA, phaseTookB); + assertNotEquals(phaseTookB, phaseTookA); + assertNotEquals(phaseTookA, phaseTookC); + assertNotEquals(phaseTookC, phaseTookA); + assertNotEquals(phaseTookA, phaseTookD); + assertNotEquals(phaseTookD, phaseTookA); + assertEquals(phaseTookA, phaseTookA); + assertEquals(phaseTookB, phaseTookB); + assertEquals(phaseTookC, phaseTookC); + assertEquals(phaseTookD, phaseTookD); + } + static class DummySearchExtBuilder extends SearchExtBuilder { static ParseField DUMMY_FIELD = new ParseField("dummy"); diff --git a/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java new file mode 100644 index 0000000000000..f0f1a43e6c21e --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchTimeProviderTests extends OpenSearchTestCase { + + public void testSearchTimeProviderPhaseFailure() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testTimeProvider.onPhaseStart(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseFailure(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + } + } + + public void testSearchTimeProviderPhaseEnd() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 100); + testTimeProvider.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseEnd(ctx); + assertThat(testTimeProvider.getPhaseTookTime(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java index 77c9c64ad6611..19a9918fa4561 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java @@ -116,7 +116,8 @@ public void setUp() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - circuitBreakerService + circuitBreakerService, + NoopTracer.INSTANCE ); clusterService = createClusterService(threadPool); transportService = new TransportService( diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 0bee99f4d5656..dad0fa0efd3ec 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -153,7 +153,7 @@ public class TransportReplicationActionTests extends OpenSearchTestCase { /** * takes a request that was sent by a {@link TransportReplicationAction} and captured * and returns the underlying request if it's wrapped or the original (cast to the expected type). - * + *

                            * This will throw a {@link ClassCastException} if the request is of the wrong type. */ public static R resolveRequest(TransportRequest requestOrWrappedRequest) { @@ -1318,7 +1318,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( Settings.EMPTY, diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 839ac7ab5bb92..cce8758ef1014 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -114,7 +114,7 @@ * This test tests the concurrent execution of several transport replication actions. All of these actions (except one) acquire a single * permit during their execution on shards and are expected to fail if a global level or index level block is present in the cluster state. * These actions are all started at the same time, but some are delayed until one last action. - * + *

                            * This last action is special because it acquires all the permits on shards, adds the block to the cluster state and then "releases" the * previously delayed single permit actions. This way, there is a clear transition between the single permit actions executed before the * all permit action that sets the block and those executed afterwards that are doomed to fail because of the block. diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java index 4a2185d1558f7..7212b1f5efe13 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java @@ -392,7 +392,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, TransportWriteActionForIndexingPressureTests.this.indexingPressureService, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 9d2069ac16190..b4549f82230bf 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -477,7 +477,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(Settings.EMPTY, TransportWriteActionTests.this.clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; @@ -505,7 +506,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = false; this.withDocumentFailureOnReplica = false; diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 8ba965b3df1ab..f037b75dc16a3 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -190,6 +190,9 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -216,6 +219,9 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -242,6 +248,9 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, null ) ); @@ -299,6 +308,9 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -325,6 +337,9 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -351,6 +366,9 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index f37823d2c0c7d..1c0dc7fc1ca2d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -938,6 +938,8 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep Version.CURRENT, randomAlphaOfLength(10), false, + 1, + randomAlphaOfLength(10), Collections.emptyList(), randomAlphaOfLength(10), true diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index e40826915c848..cace66d8c6d9e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -139,6 +139,7 @@ import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -1177,6 +1178,8 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), true) + .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getSettings()).thenReturn(settings); @@ -1200,8 +1203,12 @@ public void testvalidateIndexSettings() { ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); - assertThat(validationErrors.size(), is(1)); - assertThat(validationErrors.get(0), is("expected total copies needs to be a multiple of total awareness attributes [3]")); + assertThat(validationErrors.size(), is(2)); + assertThat( + validationErrors.get(0), + is("index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true]") + ); + assertThat(validationErrors.get(1), is("expected total copies needs to be a multiple of total awareness attributes [3]")); settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -1209,8 +1216,13 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_NUMBER_OF_REPLICAS, 2) + .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), false) + .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); assertThat(validationErrors.size(), is(0)); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java index 40eefa6cdbf03..618fcb923bc60 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java @@ -627,6 +627,39 @@ public void testGlobalStateEqualsCoordinationMetadata() { assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); } + public void testGlobalResourcesStateEqualsCoordinationMetadata() { + CoordinationMetadata coordinationMetadata1 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata1 = Metadata.builder() + .coordinationMetadata(coordinationMetadata1) + .clusterUUID(randomAlphaOfLength(10)) + .clusterUUIDCommitted(false) + .hashesOfConsistentSettings(Map.of("a", "b")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + CoordinationMetadata coordinationMetadata2 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata2 = Metadata.builder() + .coordinationMetadata(coordinationMetadata2) + .clusterUUIDCommitted(true) + .clusterUUID(randomAlphaOfLength(11)) + .hashesOfConsistentSettings(Map.of("b", "a")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + + assertTrue(Metadata.isGlobalStateEquals(metadata1, metadata1)); + assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); + assertTrue(Metadata.isGlobalResourcesMetadataEquals(metadata1, metadata2)); + } + public void testSerializationWithIndexGraveyard() throws IOException { final IndexGraveyard graveyard = IndexGraveyardTests.createRandom(); final Metadata originalMeta = Metadata.builder().indexGraveyard(graveyard).build(); diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 8b61e8f6d724d..c8a6fc76ce820 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -38,13 +38,16 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -81,6 +84,22 @@ public void testRolesAreSorted() { } + public void testRemoteStoreRedactionInToString() { + final Set roles = new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)); + Map attributes = new HashMap<>(); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + final DiscoveryNode node = new DiscoveryNode( + "name", + "id", + new TransportAddress(TransportAddress.META_ADDRESS, 9200), + attributes, + roles, + Version.CURRENT + ); + assertFalse(node.toString().contains(RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + public void testDiscoveryNodeIsCreatedWithHostFromInetAddress() throws Exception { InetAddress inetAddress = randomBoolean() ? InetAddress.getByName("192.0.2.1") diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java index f9f181402da1b..8cd664c8c13fc 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -50,7 +50,7 @@ public class AllocationPriorityTests extends OpenSearchAllocationTestCase { /** * Tests that higher prioritized primaries and replicas are allocated first even on the balanced shard allocator - * See https://github.com/elastic/elasticsearch/issues/13249 for details + * See elasticsearch issue #13249 for details */ public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService( diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index ba1e2d27a3bc5..62dce9c4edeb5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -247,13 +247,13 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. - * + *

                            * This test mimics a cluster state containing four nodes, where one node breaches two constraints while one breaches * only one. In order to have nodes breach constraints, test excludes two nodes (node2, node3) from allocation so * that other two nodes (node0, node1) have all shards assignments resulting in constraints breach. Test asserts that * the new primary shard assignment lands on the node breaching one constraint(node1), while replica land on the other * (node0). Final shard allocation state. - * + *

                            routing_nodes: -----node_id[node2][V] -----node_id[node3][V] @@ -384,13 +384,13 @@ public void testGlobalPrimaryBalance() throws Exception { * This test mimics a cluster state which can not be rebalanced due to * {@link org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider} * allocation decider which prevents shard relocation, leaving cluster unbalanced on primaries. - * + *

                            * There are two nodes (N1, N2) where all primaries land on N1 while replicas on N2. * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + *

                            * -----node_id[node_0][V] * --------[test][1], node[node_0], [P], s[STARTED], a[id=xqfZSToVSQaff2xvuxh_yA] * --------[test][0], node[node_0], [P], s[STARTED], a[id=VGjOeBGdSmu3pJR6T7v29A] @@ -454,14 +454,14 @@ public void testPrimaryBalance_NotSolved_1() { * This test mimics cluster state where re-balancing is not possible due to existing limitation of re-balancing * logic which applies at index level i.e. balance shards single index across all nodes. This will be solved when * primary shard count across indices, constraint is added. - * + *

                            * Please note, P1, P2 belongs to different index - * + *

                            * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + *

                            * -----node_id[node_0][V] * --------[test1][0], node[node_0], [P], s[STARTED], a[id=u7qtyy5AR42hgEa-JpeArg] * --------[test0][0], node[node_0], [P], s[STARTED], a[id=BQrLSo6sQyGlcLdVvGgqLQ] diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java index 10271cad33fec..8f90882c21804 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -66,7 +66,7 @@ * A base testcase that allows to run tests based on the output of the CAT API * The input is a line based cat/shards output like: * kibana-int 0 p STARTED 2 24.8kb 10.202.245.2 r5-9-35 - * + *

                            * the test builds up a clusterstate from the cat input and optionally runs a full balance on it. * This can be used to debug cluster allocation decisions. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java index cf32d2b3cf00f..7f2f048485318 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java @@ -30,7 +30,7 @@ public class IndexShardConstraintDeciderOverlapTests extends OpenSearchAllocatio /** * High watermark breach blocks new shard allocations to affected nodes. If shard count on such * nodes is low, this will cause IndexShardPerNodeConstraint to breach. - * + *

                            * This test verifies that this doesn't lead to unassigned shards, and there are no hot spots in eligible * nodes. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java index 2efbb256e36bc..617c9b4701722 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java @@ -58,7 +58,7 @@ public void testUnderReplicatedClusterScaleOut() { /** * Test cluster scale in scenario, when nodes are gracefully excluded from * cluster before termination. - * + *

                            * During moveShards(), shards are picked from across indexes in an interleaved manner. * This prevents hot spots by evenly picking up shards. Since shard order can change * in subsequent runs. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index ef9ae90e18bb5..e1c0a7eff1f6e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -21,7 +21,7 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe /** * Test remote shard allocation and balancing for standard new cluster setup. - * + *

                            * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { @@ -72,7 +72,7 @@ private int getTotalShardCountAcrossNodes(final Map nodePrimari /** * Asserts that the expected value is within the variance range. - * + *

                            * Being used to assert the average number of shards per node. * Variance is required in case of non-absolute mean values; * for example, total number of remote capable nodes in a cluster. diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 9cdbe04e0a0e4..85f6c129944fa 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -487,6 +487,9 @@ public void onFailure(String source, Exception e) { } }); assertBusy(mockAppender::assertAllExpectationsMatched); + // verify stats values after state is published + assertEquals(1, clusterManagerService.getClusterStateStats().getUpdateSuccess()); + assertEquals(0, clusterManagerService.getClusterStateStats().getUpdateFailed()); } } } diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index 0ebfe02dc7641..1a499bac3e2e8 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -239,7 +239,7 @@ public void testOffsetRounding() { * {@link DateTimeUnit} and {@link ZoneId} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. - * + *

                            * It rounds the test date down and up and performs various checks on the * rounding unit interval that is defined by this. Assumptions tested are * described in diff --git a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java index 947a4f9b1c9ab..1780819390052 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java @@ -20,6 +20,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.function.UnaryOperator; import org.mockito.Mockito; @@ -51,10 +52,12 @@ public void testReadBlobAsync() throws Exception { // Objects needed for API call final byte[] data = new byte[size]; Randomness.get().nextBytes(data); + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); final ListenerTestUtils.CountingCompletionListener completionListener = new ListenerTestUtils.CountingCompletionListener<>(); - final ReadContext readContext = new ReadContext(size, List.of(inputStreamContainer), null); + final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); Mockito.doAnswer(invocation -> { ActionListener readContextActionListener = invocation.getArgument(1); @@ -76,7 +79,7 @@ public void testReadBlobAsync() throws Exception { assertEquals(1, response.getNumberOfParts()); assertEquals(size, response.getBlobSize()); - InputStreamContainer responseContainer = response.getPartStreams().get(0); + InputStreamContainer responseContainer = response.getPartStreams().get(0).get().join(); assertEquals(0, responseContainer.getOffset()); assertEquals(size, responseContainer.getContentLength()); assertEquals(100, responseContainer.getInputStream().available()); @@ -99,7 +102,8 @@ public void testReadBlobAsyncException() throws Exception { final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); final ListenerTestUtils.CountingCompletionListener completionListener = new ListenerTestUtils.CountingCompletionListener<>(); - final ReadContext readContext = new ReadContext(size, List.of(inputStreamContainer), null); + final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); Mockito.doAnswer(invocation -> { ActionListener readContextActionListener = invocation.getArgument(1); diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java deleted file mode 100644 index fa13d90f42fa6..0000000000000 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FileCompletionListenerTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.blobstore.stream.read.listener; - -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; - -import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; - -public class FileCompletionListenerTests extends OpenSearchTestCase { - - public void testFileCompletionListener() { - int numStreams = 10; - String fileName = "test_segment_file"; - CountingCompletionListener completionListener = new CountingCompletionListener(); - FileCompletionListener fileCompletionListener = new FileCompletionListener(numStreams, fileName, completionListener); - - for (int stream = 0; stream < numStreams; stream++) { - // Ensure completion listener called only when all streams are completed - assertEquals(0, completionListener.getResponseCount()); - fileCompletionListener.onResponse(null); - } - - assertEquals(1, completionListener.getResponseCount()); - assertEquals(fileName, completionListener.getResponse()); - } - - public void testFileCompletionListenerFailure() { - int numStreams = 10; - String fileName = "test_segment_file"; - CountingCompletionListener completionListener = new CountingCompletionListener(); - FileCompletionListener fileCompletionListener = new FileCompletionListener(numStreams, fileName, completionListener); - - // Fail the listener initially - IOException exception = new IOException(); - fileCompletionListener.onFailure(exception); - - for (int stream = 0; stream < numStreams - 1; stream++) { - assertEquals(0, completionListener.getResponseCount()); - fileCompletionListener.onResponse(null); - } - - assertEquals(1, completionListener.getFailureCount()); - assertEquals(exception, completionListener.getException()); - assertEquals(0, completionListener.getResponseCount()); - - fileCompletionListener.onFailure(exception); - assertEquals(2, completionListener.getFailureCount()); - assertEquals(exception, completionListener.getException()); - } -} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java index 811566eb5767b..f2a758b9bbe10 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java @@ -13,14 +13,11 @@ import org.junit.Before; import java.io.ByteArrayInputStream; -import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; +import java.util.function.UnaryOperator; public class FilePartWriterTests extends OpenSearchTestCase { @@ -34,130 +31,37 @@ public void init() throws Exception { public void testFilePartWriter() throws Exception { Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); int contentLength = 100; - int partNumber = 1; InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); assertTrue(Files.exists(segmentFilePath)); assertEquals(contentLength, Files.size(segmentFilePath)); - assertEquals(1, fileCompletionListener.getResponseCount()); - assertEquals(Integer.valueOf(partNumber), fileCompletionListener.getResponse()); } public void testFilePartWriterWithOffset() throws Exception { Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); int contentLength = 100; int offset = 10; - int partNumber = 1; InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), offset); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); assertTrue(Files.exists(segmentFilePath)); assertEquals(contentLength + offset, Files.size(segmentFilePath)); - assertEquals(1, fileCompletionListener.getResponseCount()); - assertEquals(Integer.valueOf(partNumber), fileCompletionListener.getResponse()); } public void testFilePartWriterLargeInput() throws Exception { Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); int contentLength = 20 * 1024 * 1024; - int partNumber = 1; InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, contentLength, 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); assertTrue(Files.exists(segmentFilePath)); assertEquals(contentLength, Files.size(segmentFilePath)); - - assertEquals(1, fileCompletionListener.getResponseCount()); - assertEquals(Integer.valueOf(partNumber), fileCompletionListener.getResponse()); - } - - public void testFilePartWriterException() throws Exception { - Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); - int contentLength = 100; - int partNumber = 1; - InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); - InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, contentLength, 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - - IOException ioException = new IOException(); - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - assertFalse(anyStreamFailed.get()); - filePartWriter.processFailure(ioException); - - assertTrue(anyStreamFailed.get()); - assertFalse(Files.exists(segmentFilePath)); - - // Fail stream again to simulate another stream failure for same file - filePartWriter.processFailure(ioException); - - assertTrue(anyStreamFailed.get()); - assertFalse(Files.exists(segmentFilePath)); - - assertEquals(0, fileCompletionListener.getResponseCount()); - assertEquals(1, fileCompletionListener.getFailureCount()); - assertEquals(ioException, fileCompletionListener.getException()); - } - - public void testFilePartWriterStreamFailed() throws Exception { - Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); - int contentLength = 100; - int partNumber = 1; - InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); - InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), 0); - AtomicBoolean anyStreamFailed = new AtomicBoolean(true); - CountingCompletionListener fileCompletionListener = new CountingCompletionListener<>(); - - FilePartWriter filePartWriter = new FilePartWriter( - partNumber, - inputStreamContainer, - segmentFilePath, - anyStreamFailed, - fileCompletionListener - ); - filePartWriter.run(); - - assertFalse(Files.exists(segmentFilePath)); - assertEquals(0, fileCompletionListener.getResponseCount()); } } diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java index 21b7b47390a9b..0163c2275e7f4 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -29,7 +29,9 @@ import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.function.UnaryOperator; import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; @@ -46,6 +48,7 @@ public class ReadContextListenerTests extends OpenSearchTestCase { private static final int NUMBER_OF_PARTS = 5; private static final int PART_SIZE = 10; private static final String TEST_SEGMENT_FILE = "test_segment_file"; + private static final int MAX_CONCURRENT_STREAMS = 10; @BeforeClass public static void setup() { @@ -64,10 +67,17 @@ public void init() throws Exception { public void testReadContextListener() throws InterruptedException, IOException { Path fileLocation = path.resolve(UUID.randomUUID().toString()); - List blobPartStreams = initializeBlobPartStreams(); + List blobPartStreams = initializeBlobPartStreams(); CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); - ReadContextListener readContextListener = new ReadContextListener(TEST_SEGMENT_FILE, fileLocation, threadPool, completionListener); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); ReadContext readContext = new ReadContext((long) PART_SIZE * NUMBER_OF_PARTS, blobPartStreams, null); readContextListener.onResponse(readContext); @@ -79,10 +89,17 @@ public void testReadContextListener() throws InterruptedException, IOException { public void testReadContextListenerFailure() throws Exception { Path fileLocation = path.resolve(UUID.randomUUID().toString()); - List blobPartStreams = initializeBlobPartStreams(); + List blobPartStreams = initializeBlobPartStreams(); CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); - ReadContextListener readContextListener = new ReadContextListener(TEST_SEGMENT_FILE, fileLocation, threadPool, completionListener); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); InputStream badInputStream = new InputStream() { @Override @@ -101,29 +118,111 @@ public int available() { } }; - blobPartStreams.add(NUMBER_OF_PARTS, new InputStreamContainer(badInputStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS)); + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(badInputStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); readContextListener.onResponse(readContext); countDownLatch.await(); assertFalse(Files.exists(fileLocation)); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); } public void testReadContextListenerException() { Path fileLocation = path.resolve(UUID.randomUUID().toString()); CountingCompletionListener listener = new CountingCompletionListener(); - ReadContextListener readContextListener = new ReadContextListener(TEST_SEGMENT_FILE, fileLocation, threadPool, listener); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + listener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); IOException exception = new IOException(); readContextListener.onFailure(exception); assertEquals(1, listener.getFailureCount()); assertEquals(exception, listener.getException()); } - private List initializeBlobPartStreams() { - List blobPartStreams = new ArrayList<>(); + public void testWriteToTempFile() throws Exception { + final String fileName = UUID.randomUUID().toString(); + Path fileLocation = path.resolve(fileName); + List blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ByteArrayInputStream assertingStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)) { + @Override + public int read(byte[] b) throws IOException { + assertTrue("parts written to temp file location", Files.exists(readContextListener.getTmpFileLocation())); + return super.read(b); + } + }; + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(assertingStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS + 1, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertTrue(Files.exists(fileLocation)); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + public void testWriteToTempFile_alreadyExists_replacesFile() throws Exception { + final String fileName = UUID.randomUUID().toString(); + Path fileLocation = path.resolve(fileName); + // create an empty file at location. + Files.createFile(fileLocation); + assertEquals(0, Files.readAllBytes(fileLocation).length); + List blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertTrue(Files.exists(fileLocation)); + assertEquals(50, Files.readAllBytes(fileLocation).length); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + private List initializeBlobPartStreams() { + List blobPartStreams = new ArrayList<>(); for (int partNumber = 0; partNumber < NUMBER_OF_PARTS; partNumber++) { InputStream testStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)); - blobPartStreams.add(new InputStreamContainer(testStream, PART_SIZE, (long) partNumber * PART_SIZE)); + int finalPartNumber = partNumber; + blobPartStreams.add( + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(testStream, PART_SIZE, (long) finalPartNumber * PART_SIZE), + threadPool.generic() + ) + ); } return blobPartStreams; } diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java index 25140d1ef73d5..e16dd859ec14c 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java @@ -1169,9 +1169,9 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } public void testParseInvalidPolygon() throws IOException { - /** - * The following 3 test cases ensure proper error handling of invalid polygons - * per the GeoJSON specification + /* + The following 3 test cases ensure proper error handling of invalid polygons + per the GeoJSON specification */ // test case 1: create an invalid polygon with only 2 points String invalidPoly = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java index 297b7d9f65d5f..7fc95c2316aef 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java @@ -225,7 +225,7 @@ public static MultiPoint remove180s(MultiPoint points) { /** * A randomized test that generates a random lines crossing anti-merdian and checks that the decomposed segments of this line * have the same total length (measured using Euclidean distances between neighboring points) as the original line. - * + *

                            * It also extracts all points from these lines, performs normalization of these points and then compares that the resulting * points of line normalization match the points of points normalization with the exception of points that were created on the * antimeridian as the result of line decomposition. diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 02bc6c58ad233..e8ddfde11f4cc 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -61,8 +61,8 @@ protected boolean enableWarningsCheck() { } public void testTimezoneParsing() { - /** this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} - * assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); + /* this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} + assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); */ assertSameDateAs("2016-11-30T00+01", "strict_date_optional_time", "strict_date_optional_time"); assertSameDateAs("2016-11-30T00+0100", "strict_date_optional_time", "strict_date_optional_time"); diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index d28a4a51999e6..ab51cafb039c2 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -57,6 +57,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -118,12 +119,13 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("custom", custom); } }; - NetworkModule module = newNetworkModule(settings, plugin); + NetworkModule module = newNetworkModule(settings, null, plugin); assertSame(custom, module.getTransportSupplier()); } @@ -134,7 +136,7 @@ public void testRegisterHttpTransport() { .build(); Supplier custom = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map> getHttpTransports( Settings settings, @@ -154,7 +156,7 @@ public Map> getHttpTransports( assertSame(custom, module.getHttpServerTransportSupplier()); settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); - NetworkModule newModule = newNetworkModule(settings); + NetworkModule newModule = newNetworkModule(settings, null); expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); } @@ -168,7 +170,7 @@ public void testOverrideDefault() { Supplier customTransport = () -> null; // content doesn't matter we check reference equality Supplier custom = FakeHttpTransport::new; Supplier def = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map> getTransports( Settings settings, @@ -176,7 +178,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("default_custom", customTransport); } @@ -212,7 +215,7 @@ public void testDefaultKeys() { Supplier custom = FakeHttpTransport::new; Supplier def = FakeHttpTransport::new; Supplier customTransport = () -> null; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map> getTransports( Settings settings, @@ -220,7 +223,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("default_custom", customTransport); } @@ -270,7 +274,7 @@ public TransportRequestHandler interceptHandler( return actualHandler; } }; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public List getTransportInterceptors( NamedWriteableRegistry namedWriteableRegistry, @@ -292,7 +296,7 @@ public List getTransportInterceptors( assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor); NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> { - newNetworkModule(settings, new NetworkPlugin() { + newNetworkModule(settings, null, new NetworkPlugin() { @Override public List getTransportInterceptors( NamedWriteableRegistry namedWriteableRegistry, @@ -306,7 +310,186 @@ public List getTransportInterceptors( assertEquals("interceptor must not be null", nullPointerException.getMessage()); } - private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugins) { + public void testRegisterCoreInterceptor() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + List coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(0, called.get()); + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + assertEquals(1, called.get()); + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + assertEquals(2, called.get()); + assertTrue(transportInterceptor instanceof NetworkModule.CompositeTransportInterceptor); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 1); + assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor); + } + + public void testInterceptorOrder() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + AtomicInteger called1 = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + TransportInterceptor interceptor1 = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called1.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + List coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor1); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor); + } + }); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 2); + + assertEquals(0, called.get()); + assertEquals(0, called1.get()); + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + assertEquals(1, called.get()); + assertEquals(1, called1.get()); + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + assertEquals(2, called.get()); + assertEquals(2, called1.get()); + } + + public void testInterceptorOrderException() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + AtomicInteger called1 = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + TransportInterceptor interceptor1 = new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + called1.incrementAndGet(); + throw new RuntimeException("Handler Invoke Failed"); + } + }; + + List coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor1); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor); + } + }); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 2); + + assertEquals(0, called.get()); + assertEquals(0, called1.get()); + try { + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + } catch (Exception e) { + assertEquals(0, called.get()); + assertEquals(1, called1.get()); + } + try { + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + } catch (Exception e) { + assertEquals(0, called.get()); + assertEquals(2, called1.get()); + } + } + + private NetworkModule newNetworkModule( + Settings settings, + List coreTransportInterceptors, + NetworkPlugin... plugins + ) { return new NetworkModule( settings, Arrays.asList(plugins), @@ -319,7 +502,8 @@ private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugi null, new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - NoopTracer.INSTANCE + NoopTracer.INSTANCE, + coreTransportInterceptors ); } } diff --git a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java index 8297f8fcf47e2..d1b3adcd55f0c 100644 --- a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java +++ b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java @@ -212,7 +212,7 @@ public void testTimeUnitRoundingDST() { * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. - * + *

                            * It rounds the test date down and up and performs various checks on the * rounding unit interval that is defined by this. Assumptions tested are * described in diff --git a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java index 688a532a61c4a..f4515361a89b8 100644 --- a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java @@ -100,7 +100,8 @@ private void createTransportSvc() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public BoundTransportAddress boundAddress() { diff --git a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java index dc0829adac101..421f6c6fe279b 100644 --- a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java @@ -185,7 +185,8 @@ public void testRemovingLocalAddresses() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -237,7 +238,8 @@ public void testUnknownHost() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -292,7 +294,8 @@ public void testResolveTimeout() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -368,7 +371,8 @@ public void testCancellationOnClose() throws InterruptedException { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -432,7 +436,8 @@ public void testInvalidHosts() throws IllegalAccessException { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public BoundTransportAddress boundAddress() { diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index f243a924f4e63..3c25dbdff3342 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.env.Environment; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; @@ -77,6 +78,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -115,7 +117,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -409,19 +412,94 @@ public void testInitialize() throws Exception { ) ); - // Test needs to be changed to mock the connection between the local node and an extension. Assert statment is commented out for - // now. + // Test needs to be changed to mock the connection between the local node and an extension. // Link to issue: https://github.com/opensearch-project/OpenSearch/issues/4045 // mockLogAppender.assertAllExpectationsMatched(); } } + public void testInitializeExtension() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) + ); + + doNothing().when(mockTransportService).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + doNothing().when(mockTransportService) + .sendRequest(any(DiscoveryExtensionNode.class), anyString(), any(InitializeExtensionRequest.class), any()); + + extensionsManager.initializeServicesAndRestHandler( + actionModule, + settingsModule, + mockTransportService, + clusterService, + settings, + client, + identityService + ); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(firstExtension); + + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(secondExtension); + + ThreadPool.terminate(threadPool, 3, TimeUnit.SECONDS); + + verify(mockTransportService, times(2)).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + verify(mockTransportService, times(2)).sendRequest( + any(DiscoveryExtensionNode.class), + anyString(), + any(InitializeExtensionRequest.class), + any() + ); + } + public void testHandleRegisterRestActionsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -431,6 +509,58 @@ public void testHandleRegisterRestActionsRequest() throws Exception { assertTrue(((AcknowledgedResponse) response).getStatus()); } + public void testHandleRegisterRestActionsRequestRequiresDiscoveryNode() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest("uniqueId1", List.of(), List.of()); + + expectThrows( + IllegalStateException.class, + () -> extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()) + ); + } + + public void testHandleRegisterRestActionsRequestMultiple() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); + for (int i = 0; i < 2; i++) { + String uniqueIdStr = "uniqueid-%d" + i; + + Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "Extension %s" + i, + uniqueIdStr, + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + List.of(), + extensionScopedSettings + ); + + extensionsManager.loadExtension(firstExtension); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest( + uniqueIdStr, + actionsList, + deprecatedActionsList + ); + TransportResponse response = extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); + assertEquals(AcknowledgedResponse.class, response.getClass()); + assertTrue(((AcknowledgedResponse) response).getStatus()); + } + } + public void testHandleRegisterSettingsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); @@ -452,6 +582,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -467,6 +600,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("FOO /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -481,6 +617,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -495,6 +634,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java index 1dede94c68208..c4d2f81f7cf79 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -68,7 +68,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index cdddf8e9be1be..0dae0ae1b4e0b 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -19,8 +19,9 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.noop.NoopTracer; @@ -67,7 +68,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -160,8 +162,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -177,10 +179,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); @@ -210,8 +212,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -227,10 +229,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index ee36ea170e270..9da976de7d7f6 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -79,7 +79,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -150,7 +151,7 @@ public void testRestSendToExtensionAction() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; expected.add(new Route(Method.GET, uriPrefix + "/foo")); @@ -183,7 +184,7 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); @@ -229,7 +230,7 @@ public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() thr identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index 9b3fd45245ef7..1c43bb565ef69 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.UnassignedInfo; @@ -52,12 +53,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; @@ -275,7 +278,7 @@ public void testUpdateRoutingTable() { } } - public void testSkipRoutingTableUpdateWhenRemoteRecovery() { + public void testRoutingTableUpdateWhenRemoteStateRecovery() { final int numOfShards = randomIntBetween(1, 10); final IndexMetadata remoteMetadata = createIndexMetadata( @@ -286,7 +289,7 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { .build() ); - // Test remote index routing table is generated with ExistingStoreRecoverySource if no routing table is present + // Test remote index routing table is generated with ExistingStoreRecoverySource { final Index index = remoteMetadata.getIndex(); final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -322,48 +325,14 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { } - // Test remote index routing table is overridden if recovery source is not RemoteStoreRecoverySource + // Test remote index routing table is overridden if recovery source is RemoteStoreRecoverySource { - IndexRoutingTable.Builder remoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) - .initializeAsNew(remoteMetadata); final Index index = remoteMetadata.getIndex(); - final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().put(remoteMetadata, false).build()) - .routingTable(new RoutingTable.Builder().add(remoteBuilderWithoutRemoteRecovery.build()).build()) - .build(); - assertTrue(initialState.routingTable().hasIndex(index)); - final ClusterState newState = updateRoutingTable(initialState); - IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); - assertTrue(newState.routingTable().hasIndex(index)); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - - } - - // Test routing table update is skipped for a remote index - { + Map routingTableMap = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + routingTableMap.put(shardId, new IndexShardRoutingTable.Builder(new ShardId(remoteMetadata.getIndex(), 1)).build()); + } IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) .initializeAsRemoteStoreRestore( remoteMetadata, @@ -372,10 +341,9 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { remoteMetadata.getCreationVersion(), new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) ), - new HashMap<>(), + routingTableMap, true ); - final Index index = remoteMetadata.getIndex(); final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) .metadata(Metadata.builder().put(remoteMetadata, false).build()) .routingTable(new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()).build()) @@ -387,205 +355,28 @@ public void testSkipRoutingTableUpdateWhenRemoteRecovery() { assertEquals( 0, newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) - ) - ); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - - } - - // Test reset routing table for 2 indices - one remote and one non remote. - // Routing table for non remote index should be updated and remote index routing table should remain intact - { - final IndexMetadata nonRemoteMetadata = createIndexMetadata( - "test-nonremote", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards).build() - ); - IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) - .initializeAsRemoteStoreRestore( - remoteMetadata, - new RecoverySource.RemoteStoreRecoverySource( - UUIDs.randomBase64UUID(), - remoteMetadata.getCreationVersion(), - new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) - ), - new HashMap<>(), - true - ); - IndexRoutingTable.Builder nonRemoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder(nonRemoteMetadata.getIndex()) - .initializeAsNew(nonRemoteMetadata); - final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().put(remoteMetadata, false).build()) - .metadata(Metadata.builder().put(nonRemoteMetadata, false).build()) - .routingTable( - new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()) - .add(nonRemoteBuilderWithoutRemoteRecovery.build()) - .build() - ) - .build(); - assertTrue(initialState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(initialState.routingTable().hasIndex(nonRemoteMetadata.getIndex())); - final ClusterState newState = updateRoutingTable(initialState); - assertTrue(newState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(newState.routingTable().hasIndex(nonRemoteMetadata.getIndex())); - IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); - IndexRoutingTable newNonRemoteIndexRoutingTable = newState.routingTable().index(nonRemoteMetadata.getIndex()); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) - ) - ); - assertEquals( - 0, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - 0, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) ) ); assertEquals( numOfShards, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - 0, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - } - - // Test reset routing table for 2 indices, both remote backed but only once index has RemoteStoreRecoverySource. - // Routing table for only remote index without RemoteStoreRecoverySource should be updated - { - final IndexMetadata remoteWithoutRemoteRecoveryMetadata = createIndexMetadata( - "test-remote-without-recovery", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .build() - ); - IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) - .initializeAsRemoteStoreRestore( - remoteMetadata, - new RecoverySource.RemoteStoreRecoverySource( - UUIDs.randomBase64UUID(), - remoteMetadata.getCreationVersion(), - new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) - ), - new HashMap<>(), - true - ); - IndexRoutingTable.Builder remoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder( - remoteWithoutRemoteRecoveryMetadata.getIndex() - ).initializeAsNew(remoteWithoutRemoteRecoveryMetadata); - final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().put(remoteMetadata, false).build()) - .metadata(Metadata.builder().put(remoteWithoutRemoteRecoveryMetadata, false).build()) - .routingTable( - new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()) - .add(remoteBuilderWithoutRemoteRecovery.build()) - .build() - ) - .build(); - assertTrue(initialState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(initialState.routingTable().hasIndex(remoteWithoutRemoteRecoveryMetadata.getIndex())); - final ClusterState newState = updateRoutingTable(initialState); - assertTrue(newState.routingTable().hasIndex(remoteMetadata.getIndex())); - assertTrue(newState.routingTable().hasIndex(remoteWithoutRemoteRecoveryMetadata.getIndex())); - IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); - IndexRoutingTable newRemoteWithoutRemoteRecoveryIndexRoutingTable = newState.routingTable() - .index(remoteWithoutRemoteRecoveryMetadata.getIndex()); - assertEquals( - 0, newRemoteIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) ) ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) - ) - ); assertEquals( 0, newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource - ) - ); - assertEquals( - numOfShards, - newRemoteIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource - ) - ); - assertEquals( - 0, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) - ) - ); - assertEquals( - numOfShards, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( - shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) - ) - ); - assertEquals( - 0, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource ) ); assertEquals( numOfShards, - newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource ) ); + } } diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index c7ed1cb732154..74bae7b5eb7cf 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -68,6 +68,7 @@ import org.opensearch.gateway.PersistedClusterStateService.Writer; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; @@ -86,10 +87,12 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; @@ -104,6 +107,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -763,6 +767,43 @@ public void testRemotePersistedState() throws IOException { assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(true)); } + public void testRemotePersistedStateNotCommitted() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .previousClusterUUID(previousClusterUUID) + .clusterTerm(1L) + .stateVersion(5L) + .build(); + Mockito.when(remoteClusterStateService.getLatestClusterMetadataManifest(Mockito.any(), Mockito.any())) + .thenReturn(Optional.of(manifest)); + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); + + Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState( + remoteClusterStateService, + ClusterState.UNKNOWN_UUID + ); + + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); + + final long clusterTerm = randomNonNegativeLong(); + ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).clusterUUIDCommitted(false).build()) + .build(); + + remotePersistedState.setLastAcceptedState(clusterState); + ArgumentCaptor previousClusterUUIDCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterStateCaptor.capture(), previousClusterUUIDCaptor.capture()); + assertEquals(previousClusterUUID, previousClusterUUIDCaptor.getValue()); + } + public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final String previousClusterUUID = "prev-cluster-uuid"; @@ -779,6 +820,26 @@ public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOExcept assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); } + public void testRemotePersistedStateFailureStats() throws IOException { + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); + when(remoteClusterStateService.getStats()).thenReturn(remoteStateStats); + doCallRealMethod().when(remoteClusterStateService).writeMetadataFailed(); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); + assertEquals(1, remoteClusterStateService.getStats().getFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + public void testGatewayForRemoteState() throws IOException { MockGatewayMetaState gateway = null; try { @@ -840,7 +901,8 @@ public void testGatewayForRemoteStateForInitialBootstrap() throws IOException { remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + false ); final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); @@ -886,7 +948,8 @@ public void testGatewayForRemoteStateForNodeReplacement() throws IOException { remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + false ); final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); @@ -918,7 +981,13 @@ public void testGatewayForRemoteStateForNodeReboot() throws IOException { .clusterUUID(randomAlphaOfLength(10)) .build() ); - gateway = newGatewayForRemoteState(remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, clusterState); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + clusterState, + false + ); final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); verifyNoInteractions(remoteClusterStateService); @@ -933,13 +1002,77 @@ public void testGatewayForRemoteStateForNodeReboot() throws IOException { } } + public void testGatewayForRemoteStateForInitialBootstrapBlocksApplied() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn("test-cluster-uuid"); + + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings( + settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10)) + .put(IndexMetadata.INDEX_READ_ONLY_SETTING.getKey(), true) + ) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + + final ClusterState clusterState = ClusterState.builder( + createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(ClusterState.UNKNOWN_UUID) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build() + ) + ).nodes(DiscoveryNodes.EMPTY_NODES).build(); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, clusterState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + true + ); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + PersistedState lucenePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(clusterName.value()); // change this + verify(remoteStoreRestoreService).restore(any(ClusterState.class), any(String.class), anyBoolean(), any(String[].class)); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat( + Metadata.isGlobalStateEquals(lucenePersistedState.getLastAcceptedState().metadata(), clusterState.metadata()), + equalTo(true) + ); + assertThat( + lucenePersistedState.getLastAcceptedState().blocks().hasGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK), + equalTo(true) + ); + assertThat( + IndexMetadata.INDEX_READ_ONLY_SETTING.get( + lucenePersistedState.getLastAcceptedState().metadata().index("test-index1").getSettings() + ), + equalTo(true) + ); + } finally { + IOUtils.close(gateway); + } + } + private MockGatewayMetaState newGatewayForRemoteState( RemoteClusterStateService remoteClusterStateService, RemoteStoreRestoreService remoteStoreRestoreService, PersistedStateRegistry persistedStateRegistry, - ClusterState currentState + ClusterState currentState, + boolean prepareFullState ) throws IOException { - MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); + MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays, prepareFullState); String randomRepoName = "randomRepoName"; String stateRepoTypeAttributeKey = String.format( Locale.getDefault(), @@ -963,6 +1096,7 @@ private MockGatewayMetaState newGatewayForRemoteState( when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); + when(transportService.getLocalNode()).thenReturn(mock(DiscoveryNode.class)); final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService( nodeEnvironment, xContentRegistry(), diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 66426c2a880a3..6c9a3201656d7 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -26,6 +26,33 @@ public class ClusterMetadataManifestTests extends OpenSearchTestCase { + public void testClusterMetadataManifestXContentV0() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V0, + null, + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContentV0(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + public void testClusterMetadataManifestXContent() throws IOException { UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( @@ -36,6 +63,8 @@ public void testClusterMetadataManifestXContent() throws IOException { Version.CURRENT, "test-node-id", false, + ClusterMetadataManifest.CODEC_V1, + "test-global-metadata-file", Collections.singletonList(uploadedIndexMetadata), "prev-cluster-uuid", true @@ -60,6 +89,8 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { Version.CURRENT, "B10RX1f5RJenMQvYccCgSQ", true, + 1, + "test-global-metadata-file", randomUploadedIndexMetadataList(), "yfObdx8KSMKKrXf8UyHhM", true diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 65166386733c6..586618bd1ecff 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -9,9 +9,11 @@ package org.opensearch.gateway.remote; import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; @@ -26,14 +28,18 @@ import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.FilterRepository; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; @@ -59,11 +65,24 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Stream; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; +import static java.util.stream.Collectors.toList; +import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.RETAINED_MANIFESTS; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -76,11 +95,14 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class RemoteClusterStateServiceTests extends OpenSearchTestCase { private RemoteClusterStateService remoteClusterStateService; + private ClusterSettings clusterSettings; private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; private BlobStoreRepository blobStoreRepository; @@ -111,16 +133,25 @@ public void setup() { .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + ClusterModule.getNamedXWriteables().stream() + ).flatMap(Function.identity()).collect(toList()) + ); + blobStoreRepository = mock(BlobStoreRepository.class); blobStore = mock(BlobStore.class); when(blobStoreRepository.blobStore()).thenReturn(blobStore); when(repositoriesService.repository("remote_store_repository")).thenReturn(blobStoreRepository); - when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(new NamedXContentRegistry(new ArrayList<>())); + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(xContentRegistry); remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + clusterSettings, () -> 0L, threadPool ); @@ -224,14 +255,15 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); - assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 1); - assertEquals(writeContextArgumentCaptor.getAllValues().size(), 1); + assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 2); + assertEquals(writeContextArgumentCaptor.getAllValues().size(), 2); WriteContext capturedWriteContext = writeContextArgumentCaptor.getValue(); byte[] writtenBytes = capturedWriteContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream().readAllBytes(); @@ -241,7 +273,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { new BytesArray(writtenBytes) ); - assertEquals(capturedWriteContext.getWritePriority(), WritePriority.HIGH); + assertEquals(capturedWriteContext.getWritePriority(), WritePriority.URGENT); assertEquals(writtenIndexMetadata.getNumberOfShards(), 1); assertEquals(writtenIndexMetadata.getNumberOfReplicas(), 0); assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); @@ -255,13 +287,40 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { } - public void testWriteFullMetadataInParallelFailure() throws IOException { + public void testWriteFullMetadataFailureForGlobalMetadata() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); doAnswer((i) -> { + // For async write action listener will be called from different thread, replicating same behaviour here. + new Thread(new Runnable() { + @Override + public void run() { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + } + }).start(); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.GlobalMetadataTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + } + + public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); return null; }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); @@ -271,6 +330,7 @@ public void testWriteFullMetadataInParallelFailure() throws IOException { RemoteClusterStateService.IndexMetadataTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); } public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOException { @@ -278,6 +338,7 @@ public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOExc remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata(clusterState, clusterState, null); Assert.assertThat(manifest, nullValue()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); } public void testFailWriteIncrementalMetadataWhenTermChanged() { @@ -330,17 +391,214 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + /* + * Here we will verify the migration of manifest file from codec V0 and V1. + * + * Initially codec version is 0 and global metadata is also null, we will perform index metadata update. + * In final manifest codec version should be 1 and + * global metadata should be updated, even if it was not changed in this cluster state update + */ + public void testMigrationFromCodecV0ManifestToCodecV1Manifest() throws IOException { + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .nodes(nodesWithLocalNodeClusterManager()) + .build(); + + // Update only index metadata + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(previousClusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(previousClusterState).metadata(newMetadata).build(); + + // previous manifest with codec 0 and null global metadata + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .globalMetadataFileName(null) + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifestAfterUpdate = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + newClusterState, + previousManifest + ); + + // global metadata is updated + assertThat(manifestAfterUpdate.getGlobalMetadataFileName(), notNullValue()); + // Manifest file with codec version with 1 is updated. + assertThat(manifestAfterUpdate.getCodecVersion(), is(ClusterMetadataManifest.CODEC_V1)); + } + + public void testWriteIncrementalGlobalMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(Collections.emptyList()) + .globalMetadataFileName("mock-filename") + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + /* + * Here we will verify index metadata is not uploaded again if change is only in global metadata + */ + public void testGlobalMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata).version(randomNonNegativeLong())) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with index. + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + // Updating remote cluster state with changing index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only global metadata is different + Metadata newMetadata = Metadata.builder(clusterState.metadata()) + .persistentSettings(Settings.builder().put("cluster.blocks.read_only", true).build()) + .version(randomNonNegativeLong()) + .build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterIndexMetadataUpdate + ); + + // Verify that index metadata information is same in manifest files + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(manifestAfterGlobalMetadataUpdate.getIndices().size())); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexName(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexName()) + ); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexUUID(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexUUID()) + ); + + // since timestamp is part of file name, if file name is same we can confirm that file is not update in global metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getUploadedFilename(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getUploadedFilename()) + ); + + // global metadata file would have changed + assertFalse( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName() + .equalsIgnoreCase(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + } + + /* + * Here we will verify global metadata is not uploaded again if change is only in index metadata + */ + public void testIndexMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with global metadata. + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + + // Updating remote cluster state with changing global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only Index metadata is different + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterGlobalMetadataUpdate + ); + + // Verify that global metadata information is same in manifest files after updating index Metadata + // since timestamp is part of file name, if file name is same we can confirm that file is not update in index metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName(), + is(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + + // Index metadata would have changed + assertThat(manifestAfterGlobalMetadataUpdate.getIndices().size(), is(0)); + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(1)); + } + public void testReadLatestMetadataManifestFailedIOException() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); BlobContainer blobContainer = mockBlobStoreObjects(); - when( - blobContainer.listBlobsByPrefixInSortedOrder( - "manifest" + RemoteClusterStateService.DELIMITER, - 1, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ) - ).thenThrow(IOException.class); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenThrow(IOException.class); remoteClusterStateService.start(); Exception e = assertThrows( @@ -357,13 +615,8 @@ public void testReadLatestMetadataManifestFailedNoManifestFileInRemote() throws final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); BlobContainer blobContainer = mockBlobStoreObjects(); - when( - blobContainer.listBlobsByPrefixInSortedOrder( - "manifest" + RemoteClusterStateService.DELIMITER, - 1, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ) - ).thenReturn(List.of()); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(List.of()); remoteClusterStateService.start(); Optional manifest = remoteClusterStateService.getLatestClusterMetadataManifest( @@ -378,13 +631,8 @@ public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRe BlobContainer blobContainer = mockBlobStoreObjects(); BlobMetadata blobMetadata = new PlainBlobMetadata("manifestFileName", 1); - when( - blobContainer.listBlobsByPrefixInSortedOrder( - "manifest" + RemoteClusterStateService.DELIMITER, - 1, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ) - ).thenReturn(Arrays.asList(blobMetadata)); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); when(blobContainer.readBlob("manifestFileName")).thenThrow(FileNotFoundException.class); remoteClusterStateService.start(); @@ -409,6 +657,7 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) .build(); BlobContainer blobContainer = mockBlobStoreObjects(); @@ -416,7 +665,8 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE remoteClusterStateService.start(); assertEquals( - remoteClusterStateService.getLatestIndexMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getIndices() .size(), 0 ); @@ -444,10 +694,8 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, - () -> remoteClusterStateService.getLatestIndexMetadata( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) + () -> remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getIndices() ); assertEquals(e.getMessage(), "Error while downloading IndexMetadata - " + uploadedIndexMetadata.getUploadedFilename()); } @@ -465,6 +713,7 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .codecVersion(ClusterMetadataManifest.CODEC_V0) .previousClusterUUID("prev-cluster-uuid") .build(); @@ -485,6 +734,70 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + public void testReadGlobalMetadata() throws IOException { + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(new NamedXContentRegistry( + List.of(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(IndexGraveyard.TYPE), IndexGraveyard::fromXContent)))); + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName("global-metadata-file") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expactedMetadata); + + Metadata metadata = remoteClusterStateService.getLatestMetadata( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + + assertTrue(Metadata.isGlobalStateEquals(metadata, expactedMetadata)); + } + + public void testReadGlobalMetadataIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + String globalIndexMetadataName = "global-metadata-file"; + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName(globalIndexMetadataName) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainerForGlobalMetadata(blobContainer, expectedManifest, expactedMetadata); + + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(globalIndexMetadataName))).thenThrow( + FileNotFoundException.class + ); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + ); + assertEquals(e.getMessage(), "Error while downloading Global Metadata - " + globalIndexMetadataName); + } + public void testReadLatestIndexMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); @@ -511,19 +824,21 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") + .globalMetadataFileName("global-metadata-file") + .codecVersion(ClusterMetadataManifest.CODEC_V0) .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); - Map indexMetadataMap = remoteClusterStateService.getLatestIndexMetadata( + Map indexMetadataMap = remoteClusterStateService.getLatestMetadata( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() - ); + ).getIndices(); assertEquals(indexMetadataMap.size(), 1); - assertEquals(indexMetadataMap.get(index.getUUID()).getIndex().getName(), index.getName()); - assertEquals(indexMetadataMap.get(index.getUUID()).getNumberOfShards(), indexMetadata.getNumberOfShards()); - assertEquals(indexMetadataMap.get(index.getUUID()).getNumberOfReplicas(), indexMetadata.getNumberOfReplicas()); + assertEquals(indexMetadataMap.get(index.getName()).getIndex().getName(), index.getName()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfShards(), indexMetadata.getNumberOfShards()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfReplicas(), indexMetadata.getNumberOfReplicas()); } public void testMarkLastStateAsCommittedSuccess() throws IOException { @@ -596,7 +911,7 @@ public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOExcepti "cluster-uuid3", "cluster-uuid1" ); - mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, randomBoolean(), Collections.emptyMap()); remoteClusterStateService.start(); String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); @@ -618,7 +933,233 @@ public void testGetValidPreviousClusterUUIDWithInvalidMultipleChains() throws IO assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); } + public void testGetValidPreviousClusterUUIDWhenLastUUIDUncommitted() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + Map clusterUUIDCommitted = Map.of("cluster-uuid1", true, "cluster-uuid2", true, "cluster-uuid3", false); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, clusterUUIDCommitted); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid2")); + } + + public void testDeleteStaleClusterUUIDs() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + ClusterMetadataManifest clusterMetadataManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID("cluster-uuid1") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .build(); + + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + BlobContainer uuidContainerContainer = mock(BlobContainer.class); + BlobContainer manifest2Container = mock(BlobContainer.class); + BlobContainer manifest3Container = mock(BlobContainer.class); + when(blobStore.blobContainer(any())).then(invocation -> { + BlobPath blobPath1 = invocation.getArgument(0); + if (blobPath1.buildAsString().endsWith("cluster-state/")) { + return uuidContainerContainer; + } else if (blobPath1.buildAsString().contains("cluster-state/cluster-uuid2/")) { + return manifest2Container; + } else if (blobPath1.buildAsString().contains("cluster-state/cluster-uuid3/")) { + return manifest3Container; + } else { + throw new IllegalArgumentException("Unexpected blob path " + blobPath1); + } + }); + Map blobMetadataMap = Map.of( + "cluster-uuid1", + mock(BlobContainer.class), + "cluster-uuid2", + mock(BlobContainer.class), + "cluster-uuid3", + mock(BlobContainer.class) + ); + when(uuidContainerContainer.children()).thenReturn(blobMetadataMap); + when( + manifest2Container.listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + Integer.MAX_VALUE, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(List.of(new PlainBlobMetadata("mainfest2", 1L))); + when( + manifest3Container.listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + Integer.MAX_VALUE, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(List.of(new PlainBlobMetadata("mainfest3", 1L))); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterUUIDs(clusterState, clusterMetadataManifest); + try { + assertBusy(() -> { + verify(manifest2Container, times(1)).delete(); + verify(manifest3Container, times(1)).delete(); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testRemoteStateStats() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(1, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(0, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getFailedCount()); + } + + public void testRemoteStateCleanupFailureStats() throws IOException { + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleUUIDsClusterMetadata("cluster1", Arrays.asList("cluster-uuid1")); + try { + assertBusy(() -> { + // wait for stats to get updated + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(1, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testFileNames() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + String indexMetadataFileName = RemoteClusterStateService.indexMetadataFileName(indexMetadata); + String[] splittedIndexMetadataFileName = indexMetadataFileName.split(DELIMITER); + assertThat(indexMetadataFileName.split(DELIMITER).length, is(4)); + assertThat(splittedIndexMetadataFileName[0], is(METADATA_FILE_PREFIX)); + assertThat(splittedIndexMetadataFileName[1], is(RemoteStoreUtils.invertLong(indexMetadata.getVersion()))); + assertThat(splittedIndexMetadataFileName[3], is(String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION))); + + int term = randomIntBetween(5, 10); + int version = randomIntBetween(5, 10); + String manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, true); + assertThat(manifestFileName.split(DELIMITER).length, is(6)); + String[] splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[0], is(MANIFEST_FILE_PREFIX)); + assertThat(splittedName[1], is(RemoteStoreUtils.invertLong(term))); + assertThat(splittedName[2], is(RemoteStoreUtils.invertLong(version))); + assertThat(splittedName[3], is("C")); + assertThat(splittedName[5], is(String.valueOf(MANIFEST_CURRENT_CODEC_VERSION))); + + manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, false); + splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[3], is("P")); + } + + public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Exception { + BlobContainer blobContainer = mock(BlobContainer.class); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger callCount = new AtomicInteger(0); + doAnswer(invocation -> { + callCount.incrementAndGet(); + if (latch.await(5000, TimeUnit.SECONDS) == false) { + throw new Exception("Timed out waiting for delete task queuing to complete"); + } + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + any(String.class), + any(int.class), + any(BlobContainer.BlobNameSortOrder.class), + any(ActionListener.class) + ); + + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + + latch.countDown(); + assertBusy(() -> assertEquals(1, callCount.get())); + } + + public void testIndexMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getIndexMetadataUploadTimeout() + ); + + // verify update index metadata upload timeout + int indexMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.index_metadata.upload_timeout", indexMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(indexMetadataUploadTimeout, remoteClusterStateService.getIndexMetadataUploadTimeout().seconds()); + } + + public void testGlobalMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getGlobalMetadataUploadTimeout() + ); + + // verify update global metadata upload timeout + int globalMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", globalMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(globalMetadataUploadTimeout, remoteClusterStateService.getGlobalMetadataUploadTimeout().seconds()); + } + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, Collections.emptyMap()); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map clusterUUIDsPointers, + Map clusterUUIDCommitted + ) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, clusterUUIDCommitted); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map clusterUUIDsPointers, + boolean differGlobalMetadata, + Map clusterUUIDCommitted + ) throws IOException { final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); when(blobPath.add(anyString())).thenReturn(blobPath); @@ -640,7 +1181,9 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste "cluster-uuid1", clusterUUIDsPointers.get("cluster-uuid1"), randomAlphaOfLength(10), - uploadedIndexMetadataList1 + uploadedIndexMetadataList1, + "test-metadata1", + clusterUUIDCommitted.getOrDefault("cluster-uuid1", true) ); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") @@ -653,8 +1196,12 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste .numberOfShards(1) .numberOfReplicas(1) .build(); + Metadata metadata1 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); - mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1); + mockBlobContainerForGlobalMetadata(blobContainer1, clusterManifest1, metadata1); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V1); List uploadedIndexMetadataList2 = List.of( new UploadedIndexMetadata("index1", "index-uuid1", "key1"), @@ -664,7 +1211,9 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste "cluster-uuid2", clusterUUIDsPointers.get("cluster-uuid2"), randomAlphaOfLength(10), - uploadedIndexMetadataList2 + uploadedIndexMetadataList2, + "test-metadata2", + clusterUUIDCommitted.getOrDefault("cluster-uuid2", true) ); IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") .settings(indexSettings) @@ -676,37 +1225,60 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste .numberOfShards(1) .numberOfReplicas(1) .build(); + Metadata metadata2 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); - mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2); + mockBlobContainerForGlobalMetadata(blobContainer2, clusterManifest2, metadata2); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V1); + + // differGlobalMetadata controls which one of IndexMetadata or Metadata object would be different + // when comparing cluster-uuid3 and cluster-uuid1 state. + // if set true, only Metadata will differ b/w cluster uuid1 and cluster uuid3. + // If set to false, only IndexMetadata would be different + // Adding difference in EXACTLY on of these randomly will help us test if our uuid trimming logic compares both + // IndexMetadata and Metadata when deciding if the remote state b/w two different cluster uuids is same. + List uploadedIndexMetadataList3 = differGlobalMetadata + ? new ArrayList<>(uploadedIndexMetadataList1) + : List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); + IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap3 = differGlobalMetadata + ? new HashMap<>(indexMetadataMap1) + : Map.of("index-uuid1", indexMetadata5); + Metadata metadata3 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), !differGlobalMetadata).build()) + .build(); - List uploadedIndexMetadataList3 = List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( "cluster-uuid3", clusterUUIDsPointers.get("cluster-uuid3"), randomAlphaOfLength(10), - uploadedIndexMetadataList3 + uploadedIndexMetadataList3, + "test-metadata3", + clusterUUIDCommitted.getOrDefault("cluster-uuid3", true) ); - IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") - .settings(indexSettings) - .numberOfShards(1) - .numberOfReplicas(1) - .build(); - Map indexMetadataMap3 = Map.of("index-uuid1", indexMetadata5); - mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3); - - when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn( - uuidBlobContainer, - blobContainer1, - blobContainer1, - blobContainer3, - blobContainer3, - blobContainer2, - blobContainer2, - blobContainer1, - blobContainer2, - blobContainer1, - blobContainer2 + mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V1); + + ArrayList mockBlobContainerOrderedList = new ArrayList<>( + List.of(blobContainer1, blobContainer1, blobContainer3, blobContainer3, blobContainer2, blobContainer2) + ); + + if (differGlobalMetadata) { + mockBlobContainerOrderedList.addAll( + List.of(blobContainer3, blobContainer1, blobContainer3, blobContainer1, blobContainer1, blobContainer3) + ); + } + mockBlobContainerOrderedList.addAll( + List.of(blobContainer2, blobContainer1, blobContainer2, blobContainer1, blobContainer1, blobContainer2) ); + BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; + mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); + when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(uuidBlobContainer, mockBlobContainerOrderedArray); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); } @@ -714,7 +1286,9 @@ private ClusterMetadataManifest generateClusterMetadataManifest( String clusterUUID, String previousClusterUUID, String stateUUID, - List uploadedIndexMetadata + List uploadedIndexMetadata, + String globalMetadataFileName, + Boolean isUUIDCommitted ) { return ClusterMetadataManifest.builder() .indices(uploadedIndexMetadata) @@ -726,7 +1300,9 @@ private ClusterMetadataManifest generateClusterMetadataManifest( .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID(previousClusterUUID) .committed(true) - .clusterUUIDCommitted(true) + .clusterUUIDCommitted(isUUIDCommitted) + .globalMetadataFileName(globalMetadataFileName) + .codecVersion(ClusterMetadataManifest.CODEC_V1) .build(); } @@ -759,21 +1335,29 @@ private void mockBlobContainer( ClusterMetadataManifest clusterMetadataManifest, Map indexMetadataMap ) throws IOException { - BlobMetadata blobMetadata = new PlainBlobMetadata("manifestFileName", 1); - when( - blobContainer.listBlobsByPrefixInSortedOrder( - "manifest" + RemoteClusterStateService.DELIMITER, - 1, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ) - ).thenReturn(Arrays.asList(blobMetadata)); + mockBlobContainer(blobContainer, clusterMetadataManifest, indexMetadataMap, ClusterMetadataManifest.CODEC_V0); + } + + private void mockBlobContainer( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Map indexMetadataMap, + int codecVersion + ) throws IOException { + String manifestFileName = codecVersion >= ClusterMetadataManifest.CODEC_V1 + ? "manifest__manifestFileName__abcd__abcd__abcd__1" + : "manifestFileName"; + BlobMetadata blobMetadata = new PlainBlobMetadata(manifestFileName, 1); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( clusterMetadataManifest, - "manifestFileName", - blobStoreRepository.getCompressor() + manifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS ); - when(blobContainer.readBlob("manifestFileName")).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + when(blobContainer.readBlob(manifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { try { @@ -782,20 +1366,74 @@ private void mockBlobContainer( return; } String fileName = uploadedIndexMetadata.getUploadedFilename(); - BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( - indexMetadata, - fileName, - blobStoreRepository.getCompressor() - ); - when(blobContainer.readBlob(fileName + ".dat")).thenReturn( - new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()) - ); + when(blobContainer.readBlob(fileName + ".dat")).thenAnswer((invocationOnMock) -> { + BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( + indexMetadata, + fileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()); + }); } catch (IOException e) { throw new RuntimeException(e); } }); } + private void mockBlobContainerForGlobalMetadata( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Metadata metadata + ) throws IOException { + String mockManifestFileName = "manifest__1__2__C__456__1"; + BlobMetadata blobMetadata = new PlainBlobMetadata(mockManifestFileName, 1); + when( + blobContainer.listBlobsByPrefixInSortedOrder( + "manifest" + RemoteClusterStateService.DELIMITER, + 1, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(Arrays.asList(blobMetadata)); + + BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + mockManifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + + String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenAnswer( + (invocationOnMock) -> { + BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( + metadata, + "global-metadata-file", + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()); + } + ); + } + + private static ClusterState.Builder generateClusterStateWithGlobalMetadata() { + final Settings clusterSettings = Settings.builder().put("cluster.blocks.read_only", true).build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .persistentSettings(clusterSettings) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() + ); + } + private static ClusterState.Builder generateClusterStateWithOneIndex() { final Index index = new Index("test-index", "index-uuid"); final Settings idxSettings = Settings.builder() @@ -812,7 +1450,12 @@ private static ClusterState.Builder generateClusterStateWithOneIndex() { .version(1L) .stateUUID("state-uuid") .metadata( - Metadata.builder().put(indexMetadata, true).clusterUUID("cluster-uuid").coordinationMetadata(coordinationMetadata).build() + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() ); } diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index a1d6be84c9926..97bc822be7d51 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -105,6 +105,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; @@ -260,7 +261,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + DefaultRecoverySettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index 387997892ee30..32c4c048d77ba 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -31,6 +31,7 @@ package org.opensearch.index; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -49,17 +50,17 @@ public class MergePolicySettingsTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); public void testCompoundFileSettings() throws IOException { - assertThat(new MergePolicyConfig(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } private static IndexSettings indexSettings(Settings settings) { @@ -67,33 +68,197 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig( + TieredMergePolicyProvider tmp = new TieredMergePolicyProvider( logger, - indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()) + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) ); - assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); + LogByteSizeMergePolicyProvider lbsmp = new LogByteSizeMergePolicyProvider( + logger, + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) + ); + assertTrue(tmp.getMergePolicy() instanceof NoMergePolicy); + assertTrue(lbsmp.getMergePolicy() instanceof NoMergePolicy); } public void testUpdateSettings() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertThat(indexSettings.getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + IndexSettings indexSettings = indexSettings(settings); + assertThat(indexSettings.getMergePolicy(false).getNoCFSRatio(), equalTo(0.1)); indexSettings = indexSettings(build(0.9)); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.9)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.9)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.1))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.1)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.1)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.0))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("true"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(1.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(1.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("false"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); + } + + public void testDefaultMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + } + + public void testMergePolicyPrecedence() throws IOException { + // 1. INDEX_MERGE_POLICY is not set + // assert defaults + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 1.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert index policy is tiered whereas time series index policy is log_byte_size + Settings nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 1.2 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time series index policy is tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2. INDEX_MERGE_POLICY set as tiered + // assert both index and time-series-index merge policy is set as tiered + indexSettings = indexSettings( + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert both index and time-series-index merge policy is set as tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 3. INDEX_MERGE_POLICY set as log_byte_size + // assert both index and time-series-index merge policy is set as log_byte_size + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 3.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time-series-index merge policy is set as log_byte_size + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + } + + public void testInvalidMergePolicy() throws IOException { + + final Settings invalidSettings = Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc1 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.INDEX_MERGE_POLICY.get(invalidSettings) + ); + assertThat(exc1.getMessage(), containsString(" has unsupported policy specified: ")); + IllegalArgumentException exc2 = expectThrows( + IllegalArgumentException.class, + () -> indexSettings(invalidSettings).getMergePolicy(false) + ); + assertThat(exc2.getMessage(), containsString(" has unsupported policy specified: ")); + + final Settings invalidSettings2 = Settings.builder().put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc3 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.get(invalidSettings2) + ); + assertThat(exc3.getMessage(), containsString(" has unsupported policy specified: ")); + + IllegalArgumentException exc4 = expectThrows( + IllegalArgumentException.class, + () -> new IndexSettings(newIndexMeta("test", Settings.EMPTY), invalidSettings2).getMergePolicy(true) + ); + assertThat(exc4.getMessage(), containsString(" has unsupported policy specified: ")); + } + + public void testUpdateSettingsForLogByteSizeMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + assertThat(indexSettings.getMergePolicy(true).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.9) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.9)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.0) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "true") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(1.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "false") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); } public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); @@ -102,21 +267,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0 ); indexSettings.updateIndexMetadata( @@ -124,41 +289,41 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); indexSettings.updateIndexMetadata( newIndexMeta( "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001 ); indexSettings.updateIndexMetadata( @@ -166,21 +331,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); indexSettings.updateIndexMetadata( @@ -188,37 +353,37 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() ) ); - assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() ) ) ); @@ -226,50 +391,162 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetadata(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); } + public void testLogByteSizeMergePolicySettingsUpdate() throws IOException { + + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING.getKey(), + new ByteSizeValue( + LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, + ByteSizeUnit.MB + ) + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMBForForcedMerge(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, ByteSizeUnit.MB) + .getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING.getKey(), 10000000) + .build() + ) + ); + assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeDocs(), 10000000); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ) + ); + assertEquals(indexSettings.getMergePolicy(true).getNoCFSRatio(), 0.1, 0.0); + } + public Settings build(String value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(double value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(int value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(boolean value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } } diff --git a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java index 2443ee1ab40be..baaf584702f78 100644 --- a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java @@ -92,8 +92,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); @@ -123,8 +123,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index 34fa13f0ba62c..478fdcb24f76a 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -278,6 +278,13 @@ private SegmentReplicationPressureService buildPressureService(Settings settings ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - return new SegmentReplicationPressureService(settings, clusterService, indicesService, shardStateAction, mock(ThreadPool.class)); + return new SegmentReplicationPressureService( + settings, + clusterService, + indicesService, + shardStateAction, + new SegmentReplicationStatsTracker(indicesService), + mock(ThreadPool.class) + ); } } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java new file mode 100644 index 0000000000000..04423d583e8f9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; + +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; + +public class SegmentReplicationStatsTrackerTests extends OpenSearchTestCase { + + private IndicesService indicesService = mock(IndicesService.class); + + public void testRejectedCount() { + SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); + + // Verify that total rejection count is 0 on an empty rejectionCount map in statsTracker. + assertTrue(segmentReplicationStatsTracker.getRejectionCount().isEmpty()); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 0L); + + // Verify that total rejection count is 1 after incrementing rejectionCount. + segmentReplicationStatsTracker.incrementRejectionCount(Mockito.mock(ShardId.class)); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 1L); + } + +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index e3d77d45861ac..305c3a3acbf75 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -233,7 +233,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -3334,6 +3333,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will be incremented whenever cleanup is performed correctly. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(1L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3445,6 +3447,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever cleanup is disabled. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3549,6 +3554,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever there is some issue with cleanup. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3997,7 +4005,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final Path badTranslogLog = createTempDir(); final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new LocalTranslog( - new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, @@ -4014,7 +4022,8 @@ public void testRecoverFromForeignTranslog() throws IOException { shardId, translog.location(), config.getIndexSettings(), - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); EngineConfig brokenConfig = new EngineConfig.Builder().shardId(shardId) @@ -7247,7 +7256,11 @@ public void testMaxSeqNoInCommitUserData() throws Exception { engine.ensureOpen(); while (running.get() && assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().currentFileGeneration() < 500) { - engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + try { + engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + } catch (IOException e) { + fail("io exception not expected"); + } } }); rollTranslog.start(); @@ -7703,7 +7716,8 @@ public void testNotWarmUpSearcherInEngineCtor() throws Exception { config.getTranslogConfig().getShardId(), createTempDir(), config.getTranslogConfig().getIndexSettings(), - config.getTranslogConfig().getBigArrays() + config.getTranslogConfig().getBigArrays(), + "" ); EngineConfig configWithWarmer = new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 7b587e9a83d2d..57509c5daa2b1 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -35,6 +35,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; @@ -577,6 +578,59 @@ public void testDecrefToZeroRemovesFile() throws IOException { } } + public void testCommitOnCloseThrowsException_decRefStore() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS); + List operations = generateHistoryOnReplica( + randomIntBetween(1, 10), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + indexOperations(nrtEngine, operations); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); + assertEquals(2, nrtEngineStore.refCount()); + nrtEngine.close(); + assertEquals(1, nrtEngineStore.refCount()); + assertTrue(nrtEngineStore.isMarkedCorrupted()); + // store will throw when eventually closed, not handled here. + assertThrows(RuntimeException.class, nrtEngineStore::close); + } + + public void testFlushThrowsFlushFailedExceptionOnCorruption() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS); + List operations = generateHistoryOnReplica( + randomIntBetween(1, 10), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + indexOperations(nrtEngine, operations); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); + assertThrows(FlushFailedEngineException.class, nrtEngine::flush); + nrtEngine.close(); + if (nrtEngineStore.isMarkedCorrupted()) { + assertThrows(RuntimeException.class, nrtEngineStore::close); + } else { + // With certain mock directories a NoSuchFileException is thrown which is not treated as a + // corruption Exception. In these cases we don't expect any issue on store close. + nrtEngineStore.close(); + } + } + private void copySegments(Collection latestPrimaryFiles, Engine nrtEngine) throws IOException { final Store store = nrtEngine.store; final List replicaFiles = List.of(store.directory().listAll()); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2afd6773b15d4..054d3956596af 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -66,6 +66,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("index", b -> b.field("index", false)); checker.registerConflictCheck("store", b -> b.field("store", true)); checker.registerConflictCheck("format", b -> b.field("format", "yyyy-MM-dd")); + checker.registerConflictCheck("print_format", b -> b.field("print_format", "yyyy-MM-dd")); checker.registerConflictCheck("locale", b -> b.field("locale", "es")); checker.registerConflictCheck("null_value", b -> b.field("null_value", "34500000")); checker.registerUpdateCheck(b -> b.field("ignore_malformed", true), m -> assertTrue(((DateFieldMapper) m).getIgnoreMalformed())); @@ -148,7 +149,7 @@ public void testStore() throws Exception { public void testIgnoreMalformed() throws IOException { testIgnoreMalformedForValue( "2016-03-99", - "failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]" + "failed to parse date field [2016-03-99] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); testIgnoreMalformedForValue("-522000000", "long overflow"); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index 37d4a53f36878..ab53ae81ab0ce 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -107,7 +107,7 @@ public void isFieldWithinRangeTestCase(DateFieldType ft) throws IOException { w.addDocument(doc); DirectoryReader reader = DirectoryReader.open(w); - DateMathParser alternateFormat = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); + DateMathParser alternateFormat = DateFieldMapper.getDefaultDateTimeFormatter().toDateMathParser(); doTestIsFieldWithinQuery(ft, reader, null, null); doTestIsFieldWithinQuery(ft, reader, null, alternateFormat); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null); @@ -158,7 +158,7 @@ private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, public void testValueFormat() { MappedFieldType ft = new DateFieldType("field"); - long instant = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12T14:10:55")) + long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2015-10-12T14:10:55")) .toInstant() .toEpochMilli(); @@ -167,14 +167,14 @@ public void testValueFormat() { assertEquals("2015", new DateFieldType("field").docValueFormat("YYYY", ZoneOffset.UTC).format(instant)); assertEquals(instant, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", false, null)); assertEquals(instant + 999, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", true, null)); - long i = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-13")).toInstant().toEpochMilli(); + long i = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2015-10-13")).toInstant().toEpochMilli(); assertEquals(i - 1, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12||/d", true, null)); } public void testValueForSearch() { MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T12:09:55.000Z"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); + long instant = DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(date); assertEquals(date, ft.valueForDisplay(instant)); } @@ -205,7 +205,7 @@ public void testTermQuery() { ); MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T14:10:55"; - long instant = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)).toInstant().toEpochMilli(); + long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)).toInstant().toEpochMilli(); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant, instant + 999), SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) @@ -217,7 +217,7 @@ public void testTermQuery() { false, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap() @@ -254,8 +254,8 @@ public void testRangeQuery() throws IOException { MappedFieldType ft = new DateFieldType("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); - long instant2 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; + long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); + long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant1, instant2), SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) @@ -280,7 +280,7 @@ public void testRangeQuery() throws IOException { false, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap() @@ -326,8 +326,8 @@ public void testRangeQueryWithIndexSort() { MappedFieldType ft = new DateFieldType("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); - long instant2 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; + long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); + long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query pointQuery = LongPoint.newRangeQuery("field", instant1, instant2); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2); diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index f3f682697a930..9a0d34c916f5c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -1097,8 +1097,8 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { @Override protected boolean forbidPrivateIndexSettings() { - /** - * This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. + /* + This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. */ return false; } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java index 4ebb160c07c8e..331bfb7b2ddf4 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java @@ -361,7 +361,12 @@ public void testSerializeDefaults() throws Exception { // if type is date_range we check that the mapper contains the default format and locale // otherwise it should not contain a locale or format - assertTrue(got, got.contains("\"format\":\"strict_date_optional_time||epoch_millis\"") == type.equals("date_range")); + assertTrue( + got, + got.contains("\"format\":\"strict_date_time_no_millis||strict_date_optional_time||epoch_millis\"") == type.equals( + "date_range" + ) + ); assertTrue(got, got.contains("\"locale\":" + "\"" + Locale.ROOT + "\"") == type.equals("date_range")); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index 668666a53cd7c..755d77c6ae392 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -265,7 +265,9 @@ public void testDateRangeQueryUsingMappingFormat() { ); assertThat( ex.getMessage(), - containsString("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]") + containsString( + "failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" + ) ); // setting mapping format which is compatible with those dates diff --git a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java index 729205c9775b4..7c7598e5dc3c4 100644 --- a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java @@ -109,7 +109,7 @@ public void testSerialization() throws Exception { /** * Test that if we serialize and deserialize an object, further * serialization leads to identical bytes representation. - * + *

                            * This is necessary to ensure because we use the serialized BytesReference * of this builder as part of the cacheKey in * {@link ShardSearchRequest} (via diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 0d66654a70f08..62337264bc0b1 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -66,6 +66,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NestedQueryBuilderTests extends AbstractQueryTestCase { @@ -411,4 +413,22 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testSetParentFilterInContext() throws Exception { + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilderTests().createTestQueryBuilder()); + when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { + QueryShardContext context = invoke.getArgument(0); + if (context.getParentFilter() == null) { + throw new Exception("Expect parent filter to be non-null"); + } + return invoke.callRealMethod(); + }); + NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values())); + + assertNull(queryShardContext.getParentFilter()); + nqb.rewrite(queryShardContext).toQuery(queryShardContext); + assertNull(queryShardContext.getParentFilter()); + verify(innerQueryBuilder).toQuery(queryShardContext); + } } diff --git a/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java new file mode 100644 index 0000000000000..18b814aec61c2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.junit.Assert.assertEquals; + +public final class QueryShapeVisitorTests extends OpenSearchTestCase { + public void testQueryShapeVisitor() { + QueryBuilder builder = new BoolQueryBuilder().must(new TermQueryBuilder("foo", "bar")) + .filter(new ConstantScoreQueryBuilder(new RangeQueryBuilder("timestamp").from("12345677").to("2345678"))) + .should( + new BoolQueryBuilder().must(new MatchQueryBuilder("text", "this is some text")) + .mustNot(new RegexpQueryBuilder("color", "red.*")) + ) + .must(new TermsQueryBuilder("genre", "action", "drama", "romance")); + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + builder.visit(shapeVisitor); + assertEquals( + "{\"type\":\"bool\",\"must\"[{\"type\":\"term\"},{\"type\":\"terms\"}],\"filter\"[{\"type\":\"constant_score\",\"filter\"[{\"type\":\"range\"}]}],\"should\"[{\"type\":\"bool\",\"must\"[{\"type\":\"match\"}],\"must_not\"[{\"type\":\"regexp\"}]}]}", + shapeVisitor.toJson() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 50f785bdf4a34..e72be29b85b63 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -87,8 +87,8 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { ZonedDateTime start = now.minusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); ZonedDateTime end = now.plusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); query = new RangeQueryBuilder(randomFrom(DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, DATE_ALIAS_FIELD_NAME)); - query.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(start)); - query.to(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(end)); + query.from(DateFieldMapper.getDefaultDateTimeFormatter().format(start)); + query.to(DateFieldMapper.getDefaultDateTimeFormatter().format(end)); // Create timestamp option only then we have a date mapper, // otherwise we could trigger exception. if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index 0bf00f9e48137..c87cdfcc8f1a1 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -23,6 +23,8 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; + public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private ClusterService clusterService; @@ -92,7 +94,7 @@ public void testUpdateLocalRefreshTimeMs() { directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long refreshTimeMs = System.nanoTime() / 1_000_000L + randomIntBetween(10, 100); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); transferTracker.updateLocalRefreshTimeMs(refreshTimeMs); assertEquals(refreshTimeMs, transferTracker.getLocalRefreshTimeMs()); } @@ -103,7 +105,7 @@ public void testUpdateRemoteRefreshTimeMs() { directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long refreshTimeMs = System.nanoTime() / 1_000_000 + randomIntBetween(10, 100); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); transferTracker.updateRemoteRefreshTimeMs(refreshTimeMs); assertEquals(refreshTimeMs, transferTracker.getRemoteRefreshTimeMs()); } @@ -133,20 +135,29 @@ public void testComputeSeqNoLagOnUpdate() { assertEquals(localRefreshSeqNo - remoteRefreshSeqNo, transferTracker.getRefreshSeqNoLag()); } - public void testComputeTimeLagOnUpdate() { + public void testComputeTimeLagOnUpdate() throws InterruptedException { transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long currentLocalRefreshTimeMs = transferTracker.getLocalRefreshTimeMs(); - long currentTimeMs = System.nanoTime() / 1_000_000L; - long localRefreshTimeMs = currentTimeMs + randomIntBetween(100, 500); - long remoteRefreshTimeMs = currentTimeMs + randomIntBetween(50, 99); - transferTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); - assertEquals(localRefreshTimeMs - currentLocalRefreshTimeMs, transferTracker.getTimeMsLag()); - transferTracker.updateRemoteRefreshTimeMs(remoteRefreshTimeMs); - assertEquals(localRefreshTimeMs - remoteRefreshTimeMs, transferTracker.getTimeMsLag()); + + // No lag if there is a remote upload corresponding to a local refresh + assertEquals(0, transferTracker.getTimeMsLag()); + + // Set a local refresh time that is higher than remote refresh time + Thread.sleep(1); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + + // Sleep for 100ms and then the lag should be within 100ms +/- 20ms + Thread.sleep(100); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - 100) <= 20); + + transferTracker.updateRemoteRefreshTimeMs(transferTracker.getLocalRefreshTimeMs()); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + long random = randomIntBetween(50, 200); + Thread.sleep(random); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - random) <= 20); } public void testAddUploadBytesStarted() { @@ -519,7 +530,7 @@ public void testStatsObjectCreation() { transferTracker = constructTracker(); RemoteSegmentTransferTracker.Stats transferTrackerStats = transferTracker.stats(); assertEquals(transferTracker.getShardId(), transferTrackerStats.shardId); - assertEquals(transferTracker.getTimeMsLag(), (int) transferTrackerStats.refreshTimeLagMs); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - transferTrackerStats.refreshTimeLagMs) <= 20); assertEquals(transferTracker.getLocalRefreshSeqNo(), (int) transferTrackerStats.localRefreshNumber); assertEquals(transferTracker.getRemoteRefreshSeqNo(), (int) transferTrackerStats.remoteRefreshNumber); assertEquals(transferTracker.getBytesLag(), (int) transferTrackerStats.bytesLag); @@ -591,9 +602,9 @@ private RemoteSegmentTransferTracker constructTracker() { ); transferTracker.incrementTotalUploadsStarted(); transferTracker.incrementTotalUploadsFailed(); - transferTracker.updateUploadTimeMovingAverage(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); + transferTracker.updateUploadTimeMovingAverage(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); transferTracker.updateUploadBytesMovingAverage(99); - transferTracker.updateRemoteRefreshTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); + transferTracker.updateRemoteRefreshTimeMs(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); transferTracker.incrementRejectionCount(); transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(10); transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(10, System.currentTimeMillis()); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index ada8d9983aa3d..cb77174e612fd 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -21,8 +21,11 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.IntStream; +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexShard; public class RemoteStorePressureServiceTests extends OpenSearchTestCase { @@ -58,7 +61,7 @@ public void tearDown() throws Exception { public void testIsSegmentsUploadBackpressureEnabled() { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY, remoteStoreStatsTrackerFactory); - assertFalse(pressureService.isSegmentsUploadBackpressureEnabled()); + assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); Settings newSettings = Settings.builder() .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), "true") @@ -68,7 +71,7 @@ public void testIsSegmentsUploadBackpressureEnabled() { assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); } - public void testValidateSegmentUploadLag() { + public void testValidateSegmentUploadLag() throws InterruptedException { // Create the pressure tracker IndexShard indexShard = createIndexShard(shardId, true); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); @@ -86,14 +89,27 @@ public void testValidateSegmentUploadLag() { sum.addAndGet(i); }); double avg = (double) sum.get() / 20; - long currentMs = System.nanoTime() / 1_000_000; - pressureTracker.updateLocalRefreshTimeMs((long) (currentMs + 12 * avg)); - pressureTracker.updateRemoteRefreshTimeMs(currentMs); - Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); - assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); - assertTrue(e.getMessage().contains("time_lag:114 ms dynamic_time_lag_threshold:95.0 ms")); - pressureTracker.updateRemoteRefreshTimeMs((long) (currentMs + 2 * avg)); + // We run this to ensure that the local and remote refresh time are not same anymore + while (pressureTracker.getLocalRefreshTimeMs() == currentTimeMsUsingSystemNanos()) { + Thread.sleep(10); + } + long localRefreshTimeMs = currentTimeMsUsingSystemNanos(); + pressureTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); + + while (currentTimeMsUsingSystemNanos() - localRefreshTimeMs <= 20 * avg) { + Thread.sleep((long) (4 * avg)); + } + Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); + String regex = "^rejected execution on primary shard:\\[index]\\[0] due to remote segments lagging behind " + + "local segments.time_lag:[0-9]{2,3} ms dynamic_time_lag_threshold:95\\.0 ms$"; + Pattern pattern = Pattern.compile(regex); + Matcher matcher = pattern.matcher(e.getMessage()); + assertTrue(matcher.matches()); + + pressureTracker.updateRemoteRefreshTimeMs(pressureTracker.getLocalRefreshTimeMs()); + pressureTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + Thread.sleep((long) (2 * avg)); pressureService.validateSegmentsUploadLag(shardId); // 2. bytes lag more than dynamic threshold diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java index f5514b8936a2f..064c6c10eba02 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java @@ -48,7 +48,7 @@ public void testGetDefaultSettings() { ); // Check remote refresh segment pressure enabled is false - assertFalse(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); // Check bytes lag variance threshold default value assertEquals(10.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 9afa75dd601b2..d3c7d754d6b61 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,10 +8,85 @@ package org.opensearch.index.remote; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; + public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + + private final String oldMetadataFilename = getOldSegmentMetadataFilename(12, 23, 34, 1, 1); + + /* + Gives segment metadata filename for <2.11 version + */ + public static String getOldSegmentMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion + ) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(translogGeneration), + RemoteStoreUtils.invertLong(uploadCounter), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + + public static String getOldTranslogMetadataFilename(long primaryTerm, long generation, int metadataVersion) { + return String.join( + METADATA_SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + public void testInvertToStrInvalid() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong(-1)); } @@ -60,4 +135,48 @@ public void testGetSegmentNameUnderscoreDelimiterOverrides() { public void testGetSegmentNameException() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.getSegmentName("dvd")); } + + public void testVerifyMultipleWriters_Segment() { + List mdFiles = new ArrayList<>(); + mdFiles.add(metadataFilename); + mdFiles.add(metadataFilename2); + mdFiles.add(oldMetadataFilename); + verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + + mdFiles.add(metadataFilenameDup); + assertThrows( + IllegalStateException.class, + () -> verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen) + ); + } + + public void testVerifyMultipleWriters_Translog() throws InterruptedException { + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename = tm.getFileName(); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename2 = tm2.getFileName(); + List bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + bmList.add(new PlainBlobMetadata(getOldTranslogMetadataFilename(1, 1, 1), 1)); + RemoteStoreUtils.verifyNoMultipleWriters( + bmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); + + bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + TranslogTransferMetadata tm3 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); + bmList.add(new PlainBlobMetadata(tm3.getFileName(), 1)); + List finalBmList = bmList; + assertThrows( + IllegalStateException.class, + () -> RemoteStoreUtils.verifyNoMultipleWriters( + finalBmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ) + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index d9bca55a208c2..63a9ac2f2e8ec 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -125,7 +125,8 @@ public void testRetentionLeaseSyncActionOnPrimary() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -162,7 +163,8 @@ public void testRetentionLeaseSyncActionOnReplica() throws Exception { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -203,7 +205,8 @@ public void testBlocks() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertNull(action.indexBlockLevel()); @@ -233,7 +236,8 @@ private RetentionLeaseSyncAction createAction() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index b2eb41828a4df..fa3cf7676f55c 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2745,6 +2745,7 @@ public void testRelocatedForRemoteTranslogBackedIndexWithAsyncDurability() throw AllocationId.newRelocation(routing.allocationId()) ); IndexShardTestCase.updateRoutingEntry(indexShard, routing); + indexDoc(indexShard, "_doc", "0"); assertTrue(indexShard.isSyncNeeded()); try { indexShard.relocated(routing.getTargetRelocatingShard().allocationId().getId(), primaryContext -> {}, () -> {}); @@ -4910,6 +4911,8 @@ private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker track tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); tracker.addUploadBytesFailed(10L); + tracker.incrementRejectionCount(); + tracker.incrementRejectionCount(); } private void populateSampleRemoteTranslogStats(RemoteTranslogTransferTracker tracker) { @@ -4943,5 +4946,7 @@ private static void assertRemoteSegmentStats( assertEquals(remoteSegmentTransferTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); assertEquals(remoteSegmentTransferTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); assertEquals(remoteSegmentTransferTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertTrue(remoteSegmentStats.getTotalRejections() > 0); + assertEquals(remoteSegmentTransferTracker.getRejectionCount(), remoteSegmentStats.getTotalRejections()); } } diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java index afe6e47bec7b2..a45b25f04060b 100644 --- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java @@ -133,7 +133,8 @@ public void setupListeners() throws Exception { shardId, createTempDir("translog"), indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); Engine.EventListener eventListener = new Engine.EventListener() { @Override diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java new file mode 100644 index 0000000000000..21bf580712761 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.test.CorruptionUtils; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.stream.Stream; + +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class RemoteIndexShardCorruptionTests extends IndexShardTestCase { + + public void testLocalDirectoryContains() throws IOException { + IndexShard indexShard = newStartedShard(true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } + flushShard(indexShard); + indexShard.store().incRef(); + Directory localDirectory = indexShard.store().directory(); + Path shardPath = indexShard.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + Path tempDir = createTempDir(); + for (String file : localDirectory.listAll()) { + if (file.equals("write.lock") || file.startsWith("extra")) { + continue; + } + boolean corrupted = randomBoolean(); + long checksum = 0; + try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { + checksum = CodecUtil.retrieveChecksum(indexInput); + } + if (corrupted) { + Files.copy(shardPath.resolve(file), tempDir.resolve(file)); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + } + if (corrupted == false) { + assertTrue(indexShard.localDirectoryContains(localDirectory, file, checksum)); + } else { + assertFalse(indexShard.localDirectoryContains(localDirectory, file, checksum)); + assertFalse(Files.exists(shardPath.resolve(file))); + } + } + try (Stream files = Files.list(tempDir)) { + files.forEach(p -> { + try { + Files.copy(p, shardPath.resolve(p.getFileName())); + } catch (IOException e) { + // Ignore + } + }); + } + FileSystemUtils.deleteSubDirectories(tempDir); + indexShard.store().decRef(); + closeShards(indexShard); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 6a99063d11353..20cec90d79e3e 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -31,18 +31,21 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.CorruptionUtils; import org.hamcrest.MatcherAssert; import org.junit.Assert; import java.io.IOException; +import java.nio.channels.FileChannel; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; @@ -359,6 +362,7 @@ public void testPrimaryRestart() throws Exception { * prevent FileAlreadyExistsException. It does so by only copying files in first round of segment replication without * committing locally so that in next round of segment replication those files are not considered for download again */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); @@ -370,36 +374,9 @@ public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - Runnable[] runAfterGetFiles = { () -> { throw new RuntimeException("Simulated"); }, () -> {} }; - AtomicInteger index = new AtomicInteger(0); - RemoteStoreReplicationSource testRSReplicationSource = new RemoteStoreReplicationSource(replica) { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - super.getCheckpointMetadata(replicationId, checkpoint, listener); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - ActionListener listener - ) { - super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, listener); - runAfterGetFiles[index.getAndIncrement()].run(); - } - - @Override - public String getDescription() { - return "TestRemoteStoreReplicationSource"; - } - }; - when(sourceFactory.get(any())).thenReturn(testRSReplicationSource); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); CountDownLatch latch = new CountDownLatch(1); // Start first round of segment replication. This should fail with simulated error but with replica having @@ -410,6 +387,7 @@ public String getDescription() { new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { + latch.countDown(); Assert.fail("Replication should fail with simulated error"); } @@ -419,9 +397,9 @@ public void onReplicationFailure( ReplicationFailedException e, boolean sendShardFailure ) { + latch.countDown(); assertFalse(sendShardFailure); logger.error("Replication error", e); - latch.countDown(); } } ); @@ -437,7 +415,8 @@ public void onReplicationFailure( assertEquals("Files should be copied to disk", false, onDiskFiles.isEmpty()); assertEquals(target.state().getStage(), SegmentReplicationState.Stage.GET_FILES); - // Start next round of segment replication + // Start next round of segment replication and not throwing exception resulting in commit on replica + when(sourceFactory.get(any())).thenReturn(getRemoteStoreReplicationSource(replica, () -> {})); CountDownLatch waitForSecondRound = new CountDownLatch(1); final SegmentReplicationTarget newTarget = targetService.startReplication( replica, @@ -454,9 +433,9 @@ public void onReplicationFailure( ReplicationFailedException e, boolean sendShardFailure ) { + waitForSecondRound.countDown(); logger.error("Replication error", e); Assert.fail("Replication should not fail"); - waitForSecondRound.countDown(); } } ); @@ -469,6 +448,129 @@ public void onReplicationFailure( } } + /** + * This test validates that local non-readable (corrupt, partially) on disk are deleted vs failing the + * replication event. This test mimics local files (not referenced by reader) by throwing exception post file copy and + * blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next + * round of segment replication by ensuring doc count. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") + public void testNoFailuresOnFileReads() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int docCount = 10; + shards.indexDocs(docCount); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); + CountDownLatch waitOnReplicationCompletion = new CountDownLatch(1); + + // Start first round of segment replication. This should fail with simulated error but with replica having + // files in its local store but not in active reader. + SegmentReplicationTarget segmentReplicationTarget = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + waitOnReplicationCompletion.countDown(); + Assert.fail("Replication should fail with simulated error"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + waitOnReplicationCompletion.countDown(); + assertFalse(sendShardFailure); + } + } + ); + waitOnReplicationCompletion.await(); + assertBusy(() -> { assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); }); + String fileToCorrupt = null; + // Corrupt one data file + Path shardPath = replica.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + for (String file : replica.store().directory().listAll()) { + if (file.equals("write.lock") || file.startsWith("extra") || file.startsWith("segment")) { + continue; + } + fileToCorrupt = file; + logger.info("--> Corrupting file {}", fileToCorrupt); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + break; + } + Assert.assertNotNull(fileToCorrupt); + + // Ingest more data and start next round of segment replication + shards.indexDocs(docCount); + primary.refresh("Post corruption"); + replicateSegments(primary, List.of(replica)); + + assertDocCount(primary, 2 * docCount); + assertDocCount(replica, 2 * docCount); + + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + + // clean up + shards.removeReplica(replica); + closeShards(replica); + } + } + + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { + return new RemoteStoreReplicationSource(shard) { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + super.getCheckpointMetadata(replicationId, checkpoint, listener); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + BiConsumer fileProgressTracker, + ActionListener listener + ) { + super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, (fileName, bytesRecovered) -> {}, listener); + postGetFilesRunnable.run(); + } + + @Override + public String getDescription() { + return "TestRemoteStoreReplicationSource"; + } + }; + } + + @Override + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertFalse(primary.isSearchIdleSupported()); + assertTrue(primary.isSearchIdle()); + assertTrue(primary.scheduledRefresh()); + assertFalse(primary.hasRefreshPending()); + } + private void assertSingleSegmentFile(IndexShard shard, String fileName) throws IOException { final Set segmentsFileNames = Arrays.stream(shard.store().directory().listAll()) .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 415efd4ac23b6..51814283c5eb3 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.mockito.ArgumentMatchers.any; @@ -138,7 +139,8 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { return Collections.singletonList("dummy string"); } throw new IOException(); - }).when(remoteMetadataDirectory).listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, 1); + }).when(remoteMetadataDirectory) + .listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH); SegmentInfos segmentInfos; try (Store indexShardStore = indexShard.store()) { @@ -166,7 +168,10 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the // listFilesByPrefixInLexicographicOrder has been called twice. verify(remoteMetadataDirectory, times(1)).getBlobStream(any()); - verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, 1); + verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + METADATA_FILES_TO_FETCH + ); } public void testAfterRefresh() throws IOException { @@ -515,8 +520,8 @@ private Tuple mockIn if (counter.incrementAndGet() <= succeedOnAttempt) { throw new RuntimeException("Inducing failure in upload"); } - return indexShard.getLatestSegmentInfosAndCheckpoint(); - })).when(shard).getLatestSegmentInfosAndCheckpoint(); + return indexShard.getLatestReplicationCheckpoint(); + })).when(shard).computeReplicationCheckpoint(any()); doAnswer(invocation -> { if (Objects.nonNull(successLatch)) { @@ -579,4 +584,5 @@ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentSto } } } + } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 9c8f9896850c6..c88c86d51be08 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -58,7 +58,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineCreationFailureException; import org.opensearch.index.engine.InternalEngineFactory; @@ -134,7 +134,7 @@ public void setup() throws IOException { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 528402d48658a..7caff3e5f5479 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -91,6 +91,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Function; import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; @@ -435,13 +436,17 @@ public void testShardIdleWithNoReplicas() throws Exception { shards.startAll(); final IndexShard primary = shards.getPrimary(); shards.indexDocs(randomIntBetween(1, 10)); - // ensure search idle conditions are met. - assertTrue(primary.isSearchIdle()); - assertFalse(primary.scheduledRefresh()); - assertTrue(primary.hasRefreshPending()); + validateShardIdleWithNoReplicas(primary); } } + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertTrue(primary.isSearchIdle()); + assertFalse(primary.scheduledRefresh()); + assertTrue(primary.hasRefreshPending()); + } + /** * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. */ @@ -725,6 +730,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { // set the listener, we will only fail it once we cancel the source. @@ -923,6 +929,33 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { } } + public void testReuseReplicationCheckpointWhenLatestInfosIsUnChanged() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + final ReplicationCheckpoint latestReplicationCheckpoint = primaryShard.getLatestReplicationCheckpoint(); + try (GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(segmentInfosSnapshot.get())); + } + final Tuple, ReplicationCheckpoint> latestSegmentInfosAndCheckpoint = primaryShard + .getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable closeable = latestSegmentInfosAndCheckpoint.v1()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(closeable.get())); + } + } + } + + public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + assertEquals(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard.computeReplicationCheckpoint(null)); + } + } + private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(threadPool); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java index c394101697b47..f0950fe5392de 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java @@ -47,6 +47,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -87,6 +88,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { // randomly resolve the listener, indicating the source has resolved. @@ -131,6 +133,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Should not be reached"); @@ -176,6 +179,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Unreachable"); @@ -223,6 +227,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) {} }; @@ -269,6 +274,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 3740abc57b02d..9e38e1749d434 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -29,7 +29,6 @@ import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -41,11 +40,13 @@ import org.mockito.Mockito; +import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -204,13 +205,29 @@ public void testCreateOutput() { public void testOpenInput() throws IOException { InputStream mockInputStream = mock(InputStream.class); when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); - Map fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); IndexInput indexInput = remoteDirectory.openInput("segment_1", IOContext.DEFAULT); assertTrue(indexInput instanceof RemoteIndexInput); assertEquals(100, indexInput.length()); + verify(blobContainer).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); + } + + public void testOpenInputWithLength() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); + + IndexInput indexInput = remoteDirectory.openInput("segment_1", 100, IOContext.DEFAULT); + assertTrue(indexInput instanceof RemoteIndexInput); + assertEquals(100, indexInput.length()); + verify(blobContainer, times(0)).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); } public void testOpenInputIOException() throws IOException { @@ -228,9 +245,8 @@ public void testOpenInputNoSuchFileException() throws IOException { } public void testFileLength() throws IOException { - Map fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); assertEquals(100, remoteDirectory.fileLength("segment_1")); } @@ -246,13 +262,7 @@ public void testListFilesByPrefixInLexicographicOrder() throws IOException { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of(new PlainBlobMetadata("metadata_1", 1))); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of("metadata_1"), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -262,13 +272,7 @@ public void testListFilesByPrefixInLexicographicOrderEmpty() throws IOException LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of()); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of(), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -278,13 +282,7 @@ public void testListFilesByPrefixInLexicographicOrderException() { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onFailure(new IOException("Error")); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index d7bbe52aa3905..cad5e47531cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -35,6 +35,7 @@ import org.mockito.ArgumentCaptor; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; @@ -78,7 +79,12 @@ public void testNewDirectory() throws IOException { latchedActionListener.onResponse(List.of()); return null; }).when(blobContainer) - .listBlobsByPrefixInSortedOrder(any(), eq(1), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any(ActionListener.class)); + .listBlobsByPrefixInSortedOrder( + any(), + eq(METADATA_FILES_TO_FETCH), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); @@ -93,7 +99,7 @@ public void testNewDirectory() throws IOException { verify(blobContainer).listBlobsByPrefixInSortedOrder( eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), - eq(1), + eq(METADATA_FILES_TO_FETCH), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any() ); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 8d99d98fbaaf4..36cfd84ff960a 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -30,7 +30,6 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; @@ -49,7 +48,6 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.NoSuchFileException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -62,11 +60,12 @@ import org.mockito.Mockito; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.hamcrest.CoreMatchers.is; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.contains; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -90,9 +89,39 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private SegmentInfos segmentInfos; private ThreadPool threadPool; - private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1); - private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1); - private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1); + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1" + ); @Before public void setup() throws IOException { @@ -121,6 +150,7 @@ public void setup() throws IOException { } when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); + when(threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY)).thenReturn(executorService); } @After @@ -175,15 +205,13 @@ public void testUploadedSegmentMetadataFromStringException() { } public void testGetPrimaryTermGenerationUuid() { - String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR - ); + String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split(SEPARATOR); assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens)); assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens)); } public void testInitException() throws IOException { - when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, 1)).thenThrow( + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenThrow( new IOException("Error") ); @@ -202,6 +230,13 @@ public void testInitNoMetadataFile() throws IOException { assertEquals(Set.of(), actualCache.keySet()); } + public void testInitMultipleMetadataFile() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenReturn( + List.of(metadataFilename, metadataFilenameDup) + ); + assertThrows(IllegalStateException.class, () -> remoteSegmentStoreDirectory.init()); + } + private Map> populateMetadata() throws IOException { List metadataFiles = new ArrayList<>(); @@ -212,7 +247,7 @@ private Map> populateMetadata() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); when( @@ -262,7 +297,7 @@ public void testInit() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); @@ -318,7 +353,7 @@ public void testFileLength() throws IOException { assertEquals(uploadedSegments.get("_0.si").getLength(), remoteSegmentStoreDirectory.fileLength("_0.si")); } - public void testFileLenghtNoSuchFile() throws IOException { + public void testFileLengthNoSuchFile() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -347,7 +382,7 @@ public void testOpenInput() throws IOException { remoteSegmentStoreDirectory.init(); IndexInput indexInput = mock(IndexInput.class); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenReturn(indexInput); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenReturn(indexInput); assertEquals(indexInput, remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -360,7 +395,7 @@ public void testOpenInputException() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -523,114 +558,6 @@ public void onFailure(Exception e) {} storeDirectory.close(); } - public void testCopyFilesToMultipart() throws Exception { - Settings settings = Settings.builder().build(); - FeatureFlags.initializeFeatureFlags(settings); - - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); - - Mockito.doAnswer(invocation -> { - ActionListener completionListener = invocation.getArgument(3); - completionListener.onResponse(invocation.getArgument(0)); - return null; - }).when(blobContainer).asyncBlobDownload(any(), any(), any(), any()); - - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener() { - @Override - public void onResponse(String unused) { - downloadLatch.countDown(); - } - - @Override - public void onFailure(Exception e) {} - }; - Path path = createTempDir(); - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.SECONDS)); - verify(blobContainer, times(1)).asyncBlobDownload(contains(filename), eq(path.resolve(filename)), any(), any()); - verify(storeDirectory, times(0)).copyFrom(any(), any(), any(), any()); - } - - public void testCopyFilesTo() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - downloadLatch.countDown(); - } - - @Override - public void onFailure(Exception e) {} - }; - Path path = createTempDir(); - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); - verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); - } - - public void testCopyFilesToEmptyPath() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); - - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - downloadLatch.countDown(); - } - - @Override - public void onFailure(Exception e) {} - }; - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, null, completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); - verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); - } - - public void testCopyFilesToException() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - Mockito.doThrow(new IOException()) - .when(storeDirectory) - .copyFrom(any(Directory.class), anyString(), anyString(), any(IOContext.class)); - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - - } - - @Override - public void onFailure(Exception e) { - downloadLatch.countDown(); - } - }; - Path path = createTempDir(); - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); - verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); - } - public void testCopyFilesFromMultipartIOException() throws Exception { String filename = "_100.si"; AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); @@ -693,7 +620,7 @@ public void testContainsFile() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -739,7 +666,8 @@ public void testUploadMetadataEmpty() throws IOException { segmentInfos, storeDirectory, 34L, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ) ); } @@ -757,7 +685,7 @@ public void testUploadMetadataNonEmpty() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); Map> metadataFilenameContentMapping = Map.of( @@ -785,7 +713,8 @@ public void testUploadMetadataNonEmpty() throws IOException { segInfos, storeDirectory, generation, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ); verify(remoteMetadataDirectory).copyFrom( @@ -832,7 +761,8 @@ public void testUploadMetadataMissingSegment() throws IOException { segmentInfos, storeDirectory, 12L, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ) ); verify(indexOutput).close(); @@ -855,7 +785,7 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -878,7 +808,7 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -903,7 +833,7 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -928,7 +858,7 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -953,7 +883,7 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -1114,17 +1044,21 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { } public void testMetadataFileNameOrder() { - String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1); - String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1); - String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1); - String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1); - String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1); - String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1); + String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1, ""); + String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1, ""); + String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1, ""); + String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1, ""); + String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1, ""); + String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1, ""); List actualList = new ArrayList<>(List.of(file1, file2, file3, file4, file5, file6)); actualList.sort(String::compareTo); assertEquals(List.of(file3, file2, file4, file6, file5, file1), actualList); + + long count = file1.chars().filter(ch -> ch == SEPARATOR.charAt(0)).count(); + // There should not be any `_` in mdFile name as it is used a separator . + assertEquals(14, count); } private static class WrapperIndexOutput extends IndexOutput { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java new file mode 100644 index 0000000000000..6d8b3fe4d69fb --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.NIOFSDirectory; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class RemoteStoreFileDownloaderTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private Directory source; + private Directory destination; + private Directory secondDestination; + private RemoteStoreFileDownloader fileDownloader; + private Map files = new HashMap<>(); + + @Before + public void setup() throws IOException { + final int streamLimit = randomIntBetween(1, 20); + final RecoverySettings recoverySettings = new RecoverySettings( + Settings.builder().put("indices.recovery.max_concurrent_remote_store_streams", streamLimit).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + threadPool = new TestThreadPool(getTestName()); + source = new NIOFSDirectory(createTempDir()); + destination = new NIOFSDirectory(createTempDir()); + secondDestination = new NIOFSDirectory(createTempDir()); + for (int i = 0; i < 10; i++) { + final String filename = "file_" + i; + final int content = randomInt(); + try (IndexOutput output = source.createOutput(filename, IOContext.DEFAULT)) { + output.writeInt(content); + } + files.put(filename, content); + } + fileDownloader = new RemoteStoreFileDownloader( + ShardId.fromString("[RemoteStoreFileDownloaderTests][0]"), + threadPool, + recoverySettings + ); + } + + @After + public void stopThreadPool() throws Exception { + threadPool.shutdown(); + assertTrue(threadPool.awaitTermination(5, TimeUnit.SECONDS)); + } + + public void testDownload() throws IOException { + final PlainActionFuture l = new PlainActionFuture<>(); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, files.keySet(), l); + l.actionGet(); + assertContent(files, destination); + } + + public void testDownloadWithSecondDestination() throws IOException, InterruptedException { + fileDownloader.download(source, destination, secondDestination, files.keySet(), () -> {}); + assertContent(files, destination); + assertContent(files, secondDestination); + } + + public void testDownloadWithFileCompletionHandler() throws IOException, InterruptedException { + final AtomicInteger counter = new AtomicInteger(0); + fileDownloader.download(source, destination, null, files.keySet(), counter::incrementAndGet); + assertContent(files, destination); + assertEquals(files.size(), counter.get()); + } + + public void testDownloadNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, Set.of("not real"), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testDownloadExtraNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final List filesWithExtra = new ArrayList<>(files.keySet()); + filesWithExtra.add("not real"); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, filesWithExtra, new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testCancellable() { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture blockingListener = new PlainActionFuture<>(); + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + fileDownloader.downloadAsync(cancellableThreads, source, blockingDestination, files.keySet(), blockingListener); + assertThrows( + "Expected to timeout due to blocking directory", + OpenSearchTimeoutException.class, + () -> blockingListener.actionGet(TimeValue.timeValueMillis(500)) + ); + cancellableThreads.cancel("test"); + assertThrows( + "Expected to complete with cancellation failure", + CancellableThreads.ExecutionCancelledException.class, + blockingListener::actionGet + ); + } + + public void testBlockingCallCanBeInterrupted() throws Exception { + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + final AtomicReference capturedException = new AtomicReference<>(); + final Thread thread = new Thread(() -> { + try { + fileDownloader.download(source, blockingDestination, null, files.keySet(), () -> {}); + } catch (Exception e) { + capturedException.set(e); + } + }); + thread.start(); + thread.interrupt(); + thread.join(); + assertEquals(InterruptedException.class, capturedException.get().getClass()); + } + + public void testIOException() throws IOException, InterruptedException { + final Directory failureDirectory = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + throw new IOException("test"); + } + }; + assertThrows(IOException.class, () -> fileDownloader.download(source, failureDirectory, null, files.keySet(), () -> {})); + + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, failureDirectory, files.keySet(), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(IOException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + private static void assertContent(Map expected, Directory destination) throws IOException { + // Note that Lucene will randomly write extra files (see org.apache.lucene.tests.mockfile.ExtraFS) + // so we just need to check that all the expected files are present but not that _only_ the expected + // files are present + final Set actualFiles = Set.of(destination.listAll()); + for (String file : expected.keySet()) { + assertTrue(actualFiles.contains(file)); + try (IndexInput input = destination.openInput(file, IOContext.DEFAULT)) { + assertEquals(expected.get(file), Integer.valueOf(input.readInt())); + assertThrows(EOFException.class, input::readByte); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java index f3a2f1859923e..80413d4cb6612 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java @@ -15,10 +15,24 @@ public class FileLockInfoTests extends OpenSearchTestCase { String testMetadata = "testMetadata"; String testAcquirerId = "testAcquirerId"; + String testAcquirerId2 = "ZxZ4Wh89SXyEPmSYAHrIrQ"; + String testAcquirerId3 = "ZxZ4Wh89SXyEPmSYAHrItS"; + String testMetadata1 = "metadata__9223372036854775806__9223372036854775803__9223372036854775790" + + "__9223372036854775800___Hf3Dbw2QQagfGLlVBOUrg__9223370340398865071__1"; + + String oldLock = testMetadata1 + RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR + testAcquirerId2 + + RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION; + String newLock = testMetadata1 + RemoteStoreLockManagerUtils.SEPARATOR + testAcquirerId3 + + RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION; public void testGenerateLockName() { FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withFileToLock(testMetadata).withAcquirerId(testAcquirerId).build(); assertEquals(fileLockInfo.generateLockName(), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); + + // validate that lock generated will be the new version lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withFileToLock(testMetadata1).withAcquirerId(testAcquirerId3).build(); + assertEquals(fileLockInfo.generateLockName(), newLock); + } public void testGenerateLockNameFailureCase1() { @@ -41,13 +55,33 @@ public void testGetLockPrefixFailureCase() { assertThrows(IllegalArgumentException.class, fileLockInfo::getLockPrefix); } + public void testGetFileToLockNameFromLock() { + assertEquals(testMetadata1, FileLockInfo.LockFileUtils.getFileToLockNameFromLock(oldLock)); + assertEquals(testMetadata1, FileLockInfo.LockFileUtils.getFileToLockNameFromLock(newLock)); + } + + public void testGetAcquirerIdFromLock() { + assertEquals(testAcquirerId2, FileLockInfo.LockFileUtils.getAcquirerIdFromLock(oldLock)); + assertEquals(testAcquirerId3, FileLockInfo.LockFileUtils.getAcquirerIdFromLock(newLock)); + } + public void testGetLocksForAcquirer() throws NoSuchFileException { + String[] locks = new String[] { FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId), - FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2") }; + FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2"), + oldLock, + newLock }; FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); - assertEquals(fileLockInfo.getLockForAcquirer(locks), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); + + // validate old lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId2).build(); + assertEquals(fileLockInfo.getLockForAcquirer(locks), oldLock); + + // validate new lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId3).build(); + assertEquals(fileLockInfo.getLockForAcquirer(locks), newLock); } } diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index 2de36574064cb..c098d11a3487f 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -38,7 +38,7 @@ public void testRecoveryFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -68,7 +68,7 @@ public void testRecoveryFromTranslog() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -117,7 +117,7 @@ public void testTranslogRollsGeneration() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -147,7 +147,7 @@ public void testTranslogRollsGeneration() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -182,7 +182,7 @@ public void testTrimOperationsFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -214,7 +214,7 @@ public void testTrimOperationsFromTranslog() throws IOException { translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -253,7 +253,7 @@ public void testTranslogSync() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); AtomicReference translogManagerAtomicReference = new AtomicReference<>(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index dbfc66d6de4b3..4997067b75198 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -291,7 +291,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { @@ -1452,7 +1452,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); @@ -1550,7 +1551,8 @@ public void testTranslogWriterFsyncedWithLocalTranslog() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 233d6f319b797..3cb65610fab58 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -47,6 +47,7 @@ import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -67,6 +68,7 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; @@ -95,6 +97,7 @@ import java.util.zip.CheckedInputStream; import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; @@ -122,6 +125,8 @@ public class RemoteFsTranslogTests extends OpenSearchTestCase { private ThreadPool threadPool; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; + + AtomicInteger writeCalls = new AtomicInteger(); BlobStoreRepository repository; BlobStoreTransferService blobStoreTransferService; @@ -161,13 +166,13 @@ public void tearDown() throws Exception { private RemoteFsTranslog create(Path path) throws IOException { final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return create(path, createRepository(), translogUUID); + return create(path, createRepository(), translogUUID, 0); } - private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID, int extraGenToKeep) throws IOException { this.repository = repository; globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogConfig translogConfig = getTranslogConfig(path, extraGenToKeep); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); threadPool = new TestThreadPool(getClass().getName()); blobStoreTransferService = new BlobStoreTransferService(repository.blobStore(), threadPool); @@ -183,10 +188,17 @@ private RemoteFsTranslog create(Path path, BlobStoreRepository repository, Strin primaryMode::get, new RemoteTranslogTransferTracker(shardId, 10) ); + } + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + return create(path, repository, translogUUID, 0); } private TranslogConfig getTranslogConfig(final Path path) { + return getTranslogConfig(path, 0); + } + + private TranslogConfig getTranslogConfig(final Path path, int gensToKeep) { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) // only randomize between nog age retention and a long one, so failures will have a chance of reproducing @@ -194,6 +206,7 @@ private TranslogConfig getTranslogConfig(final Path path) { .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), gensToKeep) .build(); return getTranslogConfig(path, settings); } @@ -206,7 +219,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private BlobStoreRepository createRepository() { @@ -370,6 +383,111 @@ public void testSimpleOperations() throws IOException { } + private TranslogConfig getConfig(int gensToKeep) { + Path tempDir = createTempDir(); + final TranslogConfig temp = getTranslogConfig(tempDir, gensToKeep); + final TranslogConfig config = new TranslogConfig( + temp.getShardId(), + temp.getTranslogPath(), + temp.getIndexSettings(), + temp.getBigArrays(), + new ByteSizeValue(1, ByteSizeUnit.KB), + "" + ); + return config; + } + + private ChannelFactory getChannelFactory() { + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel delegate = FileChannel.open(file, openOption); + boolean success = false; + try { + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + + final FileChannel channel; + if (isCkpFile) { + channel = delegate; + } else { + channel = new FilterFileChannel(delegate) { + + @Override + public int write(ByteBuffer src) throws IOException { + writeCalls.incrementAndGet(); + return super.write(src); + } + }; + } + success = true; + return channel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(delegate); + } + } + }; + return channelFactory; + } + + public void testExtraGenToKeep() throws Exception { + TranslogConfig config = getConfig(1); + ChannelFactory channelFactory = getChannelFactory(); + final Set persistedSeqNos = new HashSet<>(); + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); + ArrayList ops = new ArrayList<>(); + try ( + RemoteFsTranslog translog = new RemoteFsTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool, + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 })); + + // expose the new checkpoint (simulating a commit), before we trim the translog + translog.setMinSeqNoToKeep(2); + + // Trims from local + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 3, primaryTerm.get(), new byte[] { 1 })); + + // Trims from remote now + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals( + 6, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + + } + } + public void testReadLocation() throws IOException { ArrayList ops = new ArrayList<>(); ArrayList locs = new ArrayList<>(); @@ -617,14 +735,22 @@ public void testSimpleOperationsUpload() throws Exception { // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); assertEquals(2, translog.stats().estimatedNumberOfOperations()); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); translog.setMinSeqNoToKeep(2); - - assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertEquals(1, translog.stats().estimatedNumberOfOperations()); - assertBusy(() -> assertEquals(4, translog.allUploaded().size())); + assertBusy(() -> { + assertEquals(4, translog.allUploaded().size()); + assertEquals( + 4, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + } public void testMetadataFileDeletion() throws Exception { @@ -1049,7 +1175,7 @@ public void testSyncUpTo() throws IOException { } } - public void testSyncUpFailure() throws IOException { + public void testSyncUpLocationFailure() throws IOException { int translogOperations = randomIntBetween(1, 20); int count = 0; fail.failAlways(); @@ -1101,6 +1227,26 @@ public void testSyncUpFailure() throws IOException { assertDownloadStatsNoDownloads(statsTracker); } + public void testSyncUpAlwaysFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + for (int op = 0; op < translogOperations; op++) { + translog.add( + new Translog.Index(String.valueOf(op), count, primaryTerm.get(), Integer.toString(count).getBytes(StandardCharsets.UTF_8)) + ); + try { + translog.sync(); + fail("io exception expected"); + } catch (TranslogUploadFailedException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } + assertTrue(translog.isOpen()); + fail.failNever(); + translog.sync(); + } + public void testSyncUpToStream() throws IOException { int iters = randomIntBetween(5, 10); for (int i = 0; i < iters; i++) { @@ -1251,48 +1397,10 @@ public void testTranslogWriter() throws IOException { } public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { - Path tempDir = createTempDir(); - final TranslogConfig temp = getTranslogConfig(tempDir); - final TranslogConfig config = new TranslogConfig( - temp.getShardId(), - temp.getTranslogPath(), - temp.getIndexSettings(), - temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) - ); - + final TranslogConfig config = getConfig(1); final Set persistedSeqNos = new HashSet<>(); - final AtomicInteger writeCalls = new AtomicInteger(); - - final ChannelFactory channelFactory = (file, openOption) -> { - FileChannel delegate = FileChannel.open(file, openOption); - boolean success = false; - try { - // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); - - final FileChannel channel; - if (isCkpFile) { - channel = delegate; - } else { - channel = new FilterFileChannel(delegate) { - - @Override - public int write(ByteBuffer src) throws IOException { - writeCalls.incrementAndGet(); - return super.write(src); - } - }; - } - success = true; - return channel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(delegate); - } - } - }; - + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = getChannelFactory(); String translogUUID = Translog.createEmptyTranslog( config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, @@ -1360,7 +1468,8 @@ public void testTranslogWriterFsyncDisabledInRemoteFsTranslog() throws IOExcepti temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index 43b4d2c9847ab..e17d2770f014a 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -74,7 +74,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 6fc4557a75675..e34bc078896f9 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -10,12 +10,14 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -34,15 +36,19 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import org.mockito.Mockito; +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anySet; @@ -176,6 +182,93 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { assertEquals(4, fileTransferTracker.allUploaded().size()); } + public void testTransferSnapshotOnUploadTimeout() throws Exception { + doAnswer(invocationOnMock -> { + Thread.sleep(31 * 1000); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce exception = new SetOnce<>(); + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Timed out waiting for transfer of snapshot test-to-string to complete", exception.get().getMessage()); + } + + public void testTransferSnapshotOnThreadInterrupt() throws Exception { + SetOnce uploadThread = new SetOnce<>(); + doAnswer(invocationOnMock -> { + uploadThread.set(new Thread(() -> { + ActionListener listener = invocationOnMock.getArgument(2); + try { + Thread.sleep(31 * 1000); + } catch (InterruptedException ignore) { + List list = new ArrayList<>(invocationOnMock.getArgument(0)); + listener.onFailure(new FileTransferException(list.get(0), ignore)); + } + })); + uploadThread.get().start(); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce exception = new SetOnce<>(); + + Thread thread = new Thread(() -> { + try { + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + thread.start(); + + Thread.sleep(1000); + // Interrupt the thread + thread.interrupt(); + assertBusy(() -> { + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Failed to upload test-to-string", exception.get().getMessage()); + }); + uploadThread.get().interrupt(); + } + private TransferSnapshot createTransferSnapshot() { return new TransferSnapshot() { @Override @@ -228,6 +321,11 @@ public Set getTranslogFileSnapshots() { public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); } + + @Override + public String toString() { + return "test-to-string"; + } }; } @@ -251,8 +349,8 @@ public void testReadMetadataNoFile() throws IOException { assertNoDownloadStats(false); } - // This should happen most of the time - Just a single metadata file - public void testReadMetadataSingleFile() throws IOException { + // This should happen most of the time - + public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, @@ -260,12 +358,16 @@ public void testReadMetadataSingleFile() throws IOException { null, remoteTranslogTransferTracker ); - TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); - String mdFilename = tm.getFileName(); + TranslogTransferMetadata metadata1 = new TranslogTransferMetadata(1, 1, 1, 2); + String mdFilename1 = metadata1.getFileName(); + + TranslogTransferMetadata metadata2 = new TranslogTransferMetadata(1, 0, 1, 2); + String mdFilename2 = metadata2.getFileName(); doAnswer(invocation -> { LatchedActionListener> latchedActionListener = invocation.getArgument(3); List bmList = new LinkedList<>(); - bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename1, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); latchedActionListener.onResponse(bmList); return null; }).when(transferService) @@ -273,7 +375,7 @@ public void testReadMetadataSingleFile() throws IOException { TranslogTransferMetadata metadata = createTransferSnapshot().getTranslogTransferMetadata(); long delayForMdDownload = 1; - when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename))).thenAnswer(invocation -> { + when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename1))).thenAnswer(invocation -> { Thread.sleep(delayForMdDownload); return new ByteArrayInputStream(translogTransferManager.getMetadataBytes(metadata)); }); @@ -496,4 +598,43 @@ private void assertTlogCkpDownloadStats() { // Expect delay for both tlog and ckp file assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= 2 * delayForBlobDownload); } + + public void testGetPrimaryTermAndGeneration() { + String nodeId = UUID.randomUUID().toString(); + String tm = new TranslogTransferMetadata(1, 2, 1, 2, nodeId).getFileName(); + Tuple, String> actualOutput = TranslogTransferMetadata.getNodeIdByPrimaryTermAndGeneration(tm); + assertEquals(1L, (long) (actualOutput.v1().v1())); + assertEquals(2L, (long) (actualOutput.v1().v2())); + assertEquals(String.valueOf(Objects.hash(nodeId)), actualOutput.v2()); + } + + public void testMetadataConflict() throws InterruptedException { + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + null, + remoteTranslogTransferTracker + ); + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename = tm.getFileName(); + long count = mdFilename.chars().filter(ch -> ch == METADATA_SEPARATOR.charAt(0)).count(); + // There should not be any `_` in mdFile name as it is used a separator . + assertEquals(10, count); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); + String mdFilename2 = tm2.getFileName(); + + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + List bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + latchedActionListener.onResponse(bmList); + return null; + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); + + assertThrows(RuntimeException.class, translogTransferManager::readMetadata); + } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 11d916616578d..ad90255a3cc3f 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -54,7 +54,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -168,7 +168,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index bcacef83d190a..e4dd32e5c6f70 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -125,6 +125,7 @@ public void testGetSegmentFiles() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -153,6 +154,7 @@ public void testTransportTimeoutForGetSegmentFilesAction() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -178,6 +180,7 @@ public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedExcep checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, new ActionListener<>() { @Override public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java index 9204f48ba5bdd..287962b158c79 100644 --- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java @@ -90,7 +90,7 @@ public void testGetSegmentFiles() throws ExecutionException, InterruptedExceptio List filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, res); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, (fileName, bytesRecovered) -> {}, res); GetSegmentFilesResponse response = res.get(); assertEquals(response.files.size(), filesToFetch.size()); assertTrue(response.files.containsAll(filesToFetch)); @@ -104,7 +104,14 @@ public void testGetSegmentFilesAlreadyExists() throws IOException, InterruptedEx try { final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, primaryShard, res); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + filesToFetch, + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); res.get(); } catch (AssertionError | ExecutionException ex) { latch.countDown(); @@ -118,7 +125,14 @@ public void testGetSegmentFilesReturnEmptyResponse() throws ExecutionException, final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), primaryShard, res); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Collections.emptyList(), + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); GetSegmentFilesResponse response = res.get(); assert (response.files.isEmpty()); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index c1f88a6938d33..7b02635525264 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; @@ -38,6 +39,7 @@ import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -51,6 +53,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; @@ -210,6 +213,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Should not be called"); @@ -245,7 +249,7 @@ public void testAlreadyOnNewCheckpoint() { verify(spy, times(0)).startReplication(any(), any(), any()); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8928") + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testShardAlreadyReplicating() { CountDownLatch blockGetCheckpointMetadata = new CountDownLatch(1); SegmentReplicationSource source = new TestReplicationSource() { @@ -275,6 +279,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); @@ -332,6 +337,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Unreachable"); @@ -553,7 +559,7 @@ public void testForceSegmentSyncHandlerWithFailure() throws Exception { ).txGet(); }); Throwable nestedException = finalizeException.getCause().getCause(); - assertTrue(nestedException instanceof IOException); + assertNotNull(ExceptionsHelper.unwrap(finalizeException, IOException.class)); assertTrue(nestedException.getMessage().contains("dummy failure")); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 2596dd6e62026..8b4b3aff701b4 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -40,6 +40,7 @@ import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; @@ -53,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.function.BiConsumer; import org.mockito.Mockito; @@ -131,10 +133,12 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { assertEquals(1, filesToFetch.size()); assert (filesToFetch.contains(SEGMENT_FILE)); + filesToFetch.forEach(storeFileMetadata -> fileProgressTracker.accept(storeFileMetadata.name(), storeFileMetadata.length())); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -149,6 +153,19 @@ public void getSegmentFiles( public void onResponse(Void replicationResponse) { try { verify(spyIndexShard, times(1)).finalizeReplication(any()); + assertEquals( + 1, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); + assertEquals( + 0, + segrepTarget.state().getIndex().fileDetails().stream().filter(file -> file.fullyRecovered() == false).count() + ); segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); @@ -182,6 +199,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -200,6 +218,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -225,6 +252,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onFailure(exception); @@ -243,13 +271,22 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } }); } - public void testFailure_finalizeReplication_IOException() throws IOException { + public void testFailure_finalizeReplication_NonCorruptionException() throws IOException { IOException exception = new IOException("dummy failure"); SegmentReplicationSource segrepSource = new TestReplicationSource() { @@ -268,6 +305,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -288,6 +326,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals(ReplicationFailedException.class, e.getClass()); assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -313,6 +352,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -357,6 +397,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -375,6 +416,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertTrue(e instanceof OpenSearchCorruptionException); assertTrue(e.getMessage().contains("has local copies of segments that differ from the primary")); segrepTarget.fail(new ReplicationFailedException(e), false); @@ -409,6 +459,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); diff --git a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java index 0fd039b84e887..0059f8e215f2e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java @@ -46,7 +46,12 @@ public void testDeviceStats() { final int sectorsRead = randomIntBetween(8 * readsCompleted, 16 * readsCompleted); final int writesCompleted = randomIntBetween(1, 1 << 16); final int sectorsWritten = randomIntBetween(8 * writesCompleted, 16 * writesCompleted); - + final int readTime = randomIntBetween(1, 1 << 16); + ; + final int writeTime = randomIntBetween(1, 1 << 16); + ; + final int queueSize = randomIntBetween(1, 1 << 16); + final int ioTime = randomIntBetween(1, 1 << 16); FsInfo.DeviceStats previous = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -55,6 +60,10 @@ public void testDeviceStats() { sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, null ); FsInfo.DeviceStats current = new FsInfo.DeviceStats( @@ -65,6 +74,10 @@ public void testDeviceStats() { sectorsRead + 16384, writesCompleted + 2048, sectorsWritten + 32768, + readTime + 500, + writeTime + 100, + queueSize + 20, + ioTime + 8192, previous ); assertThat(current.operations(), equalTo(1024L + 2048L)); @@ -72,6 +85,10 @@ public void testDeviceStats() { assertThat(current.writeOperations(), equalTo(2048L)); assertThat(current.readKilobytes(), equalTo(16384L / 2)); assertThat(current.writeKilobytes(), equalTo(32768L / 2)); + assertEquals(500, current.readTime()); + assertEquals(100, current.writeTime()); + assertEquals(20, current.queueSize()); + assertEquals(8192, current.ioTimeInMillis()); } } diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java index 686a624d988d7..59a888c665be7 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java @@ -91,6 +91,14 @@ public void testFsInfo() throws IOException { assertThat(deviceStats.previousWritesCompleted, equalTo(-1L)); assertThat(deviceStats.currentSectorsWritten, greaterThanOrEqualTo(0L)); assertThat(deviceStats.previousSectorsWritten, equalTo(-1L)); + assertThat(deviceStats.currentReadTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousReadTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentWriteTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousWriteTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentQueueSize, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousQueueSize, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentIOTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousIOTime, greaterThanOrEqualTo(-1L)); } } else { assertNull(stats.getIoStats()); @@ -243,6 +251,16 @@ List readProcDiskStats() throws IOException { assertThat(first.devicesStats[0].previousWritesCompleted, equalTo(-1L)); assertThat(first.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(first.devicesStats[0].previousSectorsWritten, equalTo(-1L)); + + assertEquals(33457, first.devicesStats[0].currentReadTime); + assertEquals(-1, first.devicesStats[0].previousReadTime); + assertEquals(18730966, first.devicesStats[0].currentWriteTime); + assertEquals(-1, first.devicesStats[0].previousWriteTime); + assertEquals(18767169, first.devicesStats[0].currentQueueSize); + assertEquals(-1, first.devicesStats[0].previousQueueSize); + assertEquals(1918440, first.devicesStats[0].currentIOTime); + assertEquals(-1, first.devicesStats[0].previousIOTime); + assertThat(first.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(first.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(first.devicesStats[1].deviceName, equalTo("dm-2")); @@ -255,6 +273,15 @@ List readProcDiskStats() throws IOException { assertThat(first.devicesStats[1].currentSectorsWritten, equalTo(64126096L)); assertThat(first.devicesStats[1].previousSectorsWritten, equalTo(-1L)); + assertEquals(49312, first.devicesStats[1].currentReadTime); + assertEquals(-1, first.devicesStats[1].previousReadTime); + assertEquals(33730596, first.devicesStats[1].currentWriteTime); + assertEquals(-1, first.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1, first.devicesStats[1].previousIOTime); + diskStats.set( Arrays.asList( " 259 0 nvme0n1 336870 0 7928397 82876 10264393 0 182986405 52451610 0 2971042 52536492", @@ -281,6 +308,16 @@ List readProcDiskStats() throws IOException { assertThat(second.devicesStats[0].previousWritesCompleted, equalTo(8398869L)); assertThat(second.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(second.devicesStats[0].previousSectorsWritten, equalTo(118857776L)); + + assertEquals(33464, second.devicesStats[0].currentReadTime); + assertEquals(33457, second.devicesStats[0].previousReadTime); + assertEquals(18730966, second.devicesStats[0].currentWriteTime); + assertEquals(18730966, second.devicesStats[0].previousWriteTime); + assertEquals(18767176, second.devicesStats[0].currentQueueSize); + assertEquals(18767169, second.devicesStats[0].previousQueueSize); + assertEquals(1918444, second.devicesStats[0].currentIOTime); + assertEquals(1918440, second.devicesStats[0].previousIOTime); + assertThat(second.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(second.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(second.devicesStats[1].deviceName, equalTo("dm-2")); @@ -293,11 +330,25 @@ List readProcDiskStats() throws IOException { assertThat(second.devicesStats[1].currentSectorsWritten, equalTo(64128568L)); assertThat(second.devicesStats[1].previousSectorsWritten, equalTo(64126096L)); + assertEquals(49369, second.devicesStats[1].currentReadTime); + assertEquals(49312, second.devicesStats[1].previousReadTime); + assertEquals(33730766, second.devicesStats[1].currentWriteTime); + assertEquals(33730596, second.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1L, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1L, first.devicesStats[1].previousIOTime); + assertThat(second.totalOperations, equalTo(575L)); assertThat(second.totalReadOperations, equalTo(261L)); assertThat(second.totalWriteOperations, equalTo(314L)); assertThat(second.totalReadKilobytes, equalTo(2392L)); assertThat(second.totalWriteKilobytes, equalTo(1236L)); + + assertEquals(64, second.totalReadTime); + assertEquals(170, second.totalWriteTime); + assertEquals(236, second.totalQueueSize); + assertEquals(158, second.totalIOTimeInMillis); } public void testAdjustForHugeFilesystems() throws Exception { diff --git a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java new file mode 100644 index 0000000000000..b2fa884afab69 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests for ResourceUsageCollectorService where we test collect method, get method and whether schedulers + * are working as expected + */ +public class ResourceUsageCollectorServiceTests extends OpenSearchSingleNodeTestCase { + + private ClusterService clusterService; + private ResourceUsageCollectorService collector; + private ThreadPool threadpool; + NodeResourceUsageTracker tracker; + + @Before + public void setUp() throws Exception { + super.setUp(); + + threadpool = new TestThreadPool("resource_usage_collector_tests"); + + clusterService = createClusterService(threadpool); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + tracker = new NodeResourceUsageTracker( + threadpool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + collector = new ResourceUsageCollectorService(tracker, clusterService, threadpool); + tracker.start(); + collector.start(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadpool.shutdownNow(); + clusterService.close(); + collector.stop(); + tracker.stop(); + collector.close(); + tracker.close(); + } + + public void testResourceUsageStats() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99); + Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertEquals(99.0, nodeStats.get("node1").cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeStats.get("node1").memoryUtilizationPercent, 0.0); + + Optional nodeResourceUsageStatsOptional = collector.getNodeStatistics("node1"); + + assertNotNull(nodeResourceUsageStatsOptional.get()); + assertEquals(99.0, nodeResourceUsageStatsOptional.get().cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeResourceUsageStatsOptional.get().memoryUtilizationPercent, 0.0); + + nodeResourceUsageStatsOptional = collector.getNodeStatistics("node2"); + assertTrue(nodeResourceUsageStatsOptional.isEmpty()); + } + + public void testScheduler() throws Exception { + /** + * Wait for cluster state to be ready so that localNode().getId() is ready and we add the values to the map + */ + assertBusy(() -> assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()), 1, TimeUnit.MINUTES); + assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()); + /** + * Wait for memory utilization to be reported greater than 0 + */ + assertBusy( + () -> assertThat( + collector.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), + greaterThan(0.0) + ), + 5, + TimeUnit.SECONDS + ); + assertTrue(collector.getNodeStatistics("Invalid").isEmpty()); + } + + /* + * Test that concurrently adding values and removing nodes does not cause exceptions + */ + public void testConcurrentAddingAndRemovingNodes() throws Exception { + String[] nodes = new String[] { "a", "b", "c", "d" }; + + final CountDownLatch latch = new CountDownLatch(5); + + Runnable f = () -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + fail("should not be interrupted"); + } + for (int i = 0; i < randomIntBetween(100, 200); i++) { + if (randomBoolean()) { + collector.removeNodeResourceUsageStats(randomFrom(nodes)); + } + collector.collectNodeResourceUsageStats( + randomFrom(nodes), + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100) + ); + } + }; + + Thread t1 = new Thread(f); + Thread t2 = new Thread(f); + Thread t3 = new Thread(f); + Thread t4 = new Thread(f); + + t1.start(); + t2.start(); + t3.start(); + t4.start(); + latch.countDown(); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + + final Map nodeStats = collector.getAllNodeStatistics(); + for (String nodeId : nodes) { + if (nodeStats.containsKey(nodeId)) { + assertThat(nodeStats.get(nodeId).memoryUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).cpuUtilizationPercent, greaterThan(0.0)); + } + } + } + + public void testNodeRemoval() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + collector.collectNodeResourceUsageStats("node2", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + + ClusterState previousState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node1")) + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9201), "node2")) + ) + .build(); + ClusterState newState = ClusterState.builder(previousState) + .nodes(DiscoveryNodes.builder(previousState.nodes()).remove("node2")) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, previousState); + + collector.clusterChanged(event); + final Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertFalse(nodeStats.containsKey("node2")); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java new file mode 100644 index 0000000000000..374c993a264d4 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +/** + * Tests to validate AverageMemoryUsageTracker and AverageCpuUsageTracker implementation + */ +public class AverageUsageTrackerTests extends OpenSearchTestCase { + ThreadPool threadPool; + AverageMemoryUsageTracker averageMemoryUsageTracker; + AverageCpuUsageTracker averageCpuUsageTracker; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + averageMemoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + averageCpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + public void testBasicUsage() { + + assertAverageUsageStats(averageMemoryUsageTracker); + assertAverageUsageStats(averageCpuUsageTracker); + } + + public void testUpdateWindowSize() { + assertUpdateWindowSize(averageMemoryUsageTracker); + assertUpdateWindowSize(averageCpuUsageTracker); + } + + private void assertAverageUsageStats(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + } + + private void assertUpdateWindowSize(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + + usageTracker.setWindowSize(new TimeValue(2000, TimeUnit.MILLISECONDS)); + assertEquals(0, usageTracker.getWindowSize()); + assertEquals(0.0, usageTracker.getAverage(), 0.0); + // verify 2000/500 = 4 is the window size and average is calculated on window size of 4 + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(4, usageTracker.getWindowSize()); + // (1 + 2 + 1 + 2 ) / 4 = 1.5 + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + // ( 2 + 1 + 2 + 2 ) / 4 = 1.75 + assertEquals(1.75, usageTracker.getAverage(), 0.0); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java new file mode 100644 index 0000000000000..1ce68b9f29062 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests to assert resource usage trackers retrieving resource utilization averages + */ +public class NodeResourceUsageTrackerTests extends OpenSearchSingleNodeTestCase { + ThreadPool threadPool; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testStats() throws Exception { + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + tracker.start(); + /** + * Asserting memory utilization to be greater than 0 + * cpu percent used is mostly 0, so skipping assertion for that + */ + assertBusy(() -> assertThat(tracker.getMemoryUtilizationPercent(), greaterThan(0.0)), 5, TimeUnit.SECONDS); + tracker.stop(); + tracker.close(); + } + + public void testUpdateSettings() { + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + assertEquals(tracker.getResourceTrackerSettings().getCpuWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getMemoryWindowDuration().getSeconds(), 30); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "10s") + .build(); + ClusterUpdateSettingsResponse response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + assertEquals( + "10s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + + Settings jvmsettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "5s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(jvmsettings).get(); + assertEquals( + "5s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java new file mode 100644 index 0000000000000..bac4eaf3fd677 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CPUBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; + +public class AdmissionControlServiceTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + private AdmissionControlService admissionControlService; + private String action = ""; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + action = "indexing"; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testWhenAdmissionControllerRegistered() { + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + } + + public void testRegisterInvalidAdmissionController() { + String test = "TEST"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> admissionControlService.registerAdmissionController(test) + ); + assertEquals(ex.getMessage(), "Not Supported AdmissionController : " + test); + } + + public void testAdmissionControllerSettings() { + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + AdmissionControlSettings admissionControlSettings = admissionControlService.admissionControlSettings; + List admissionControllerList = admissionControlService.getAdmissionControllers(); + assertEquals(admissionControllerList.size(), 1); + CPUBasedAdmissionController cpuBasedAdmissionController = (CPUBasedAdmissionController) admissionControlService + .getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals( + admissionControlSettings.isTransportLayerAdmissionControlEnabled(), + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals( + admissionControlSettings.isTransportLayerAdmissionControlEnabled(), + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + + Settings newSettings = Settings.builder() + .put(settings) + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(newSettings); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue( + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + } + + public void testApplyAdmissionControllerDisabled() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService.applyTransportAdmissionControl(this.action); + List admissionControllerList = admissionControlService.getAdmissionControllers(); + admissionControllerList.forEach(admissionController -> { assertEquals(admissionController.getRejectionCount(), 0); }); + } + + public void testApplyAdmissionControllerEnabled() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService.applyTransportAdmissionControl(this.action); + assertEquals( + admissionControlService.getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(), + 0 + ); + + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + admissionControlService.applyTransportAdmissionControl(this.action); + List admissionControllerList = admissionControlService.getAdmissionControllers(); + assertEquals(admissionControllerList.size(), 1); + assertEquals( + admissionControlService.getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(), + 1 + ); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java new file mode 100644 index 0000000000000..c11ee1cc608f6 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; +import java.util.Set; + +public class AdmissionControlSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the admission controller settings should be supported built in settings", + settings.containsAll(List.of(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE)) + ); + } + + public void testDefaultSettings() { + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + assertEquals(admissionControlSettings.getAdmissionControlTransportLayerMode().getMode(), AdmissionControlSettings.Defaults.MODE); + } + + public void testGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + .build(); + + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } + + public void testUpdateAfterGetDefaultSettings() { + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } + + public void testUpdateAfterGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .build(); + + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + + Settings newSettings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + .build(); + + clusterService.getClusterSettings().applySettings(newSettings); + + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java new file mode 100644 index 0000000000000..af6ec0749e709 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +public class CPUBasedAdmissionControllerTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + CPUBasedAdmissionController admissionController = null; + + String action = "TEST_ACTION"; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testCheckDefaultParameters() { + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + Settings.EMPTY, + clusterService.getClusterSettings() + ); + assertEquals(admissionController.getName(), CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertFalse( + admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + } + + public void testCheckUpdateSettings() { + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + Settings.EMPTY, + clusterService.getClusterSettings() + ); + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + + assertEquals(admissionController.getName(), CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + } + + public void testApplyControllerWithDefaultSettings() { + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + Settings.EMPTY, + clusterService.getClusterSettings() + ); + assertEquals(admissionController.getRejectionCount(), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action); + assertEquals(admissionController.getRejectionCount(), 0); + } + + public void testApplyControllerWhenSettingsEnabled() { + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + admissionController = new CPUBasedAdmissionController( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, + settings, + clusterService.getClusterSettings() + ); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + assertEquals(admissionController.getRejectionCount(), 0); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action); + assertEquals(admissionController.getRejectionCount(), 1); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java new file mode 100644 index 0000000000000..98c0f3c7cf24c --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import org.opensearch.test.OpenSearchTestCase; + +public class AdmissionControlModeTests extends OpenSearchTestCase { + + public void testValidActionType() { + assertEquals(AdmissionControlMode.DISABLED.getMode(), "disabled"); + assertEquals(AdmissionControlMode.ENFORCED.getMode(), "enforced"); + assertEquals(AdmissionControlMode.MONITOR.getMode(), "monitor_only"); + assertEquals(AdmissionControlMode.fromName("disabled"), AdmissionControlMode.DISABLED); + assertEquals(AdmissionControlMode.fromName("enforced"), AdmissionControlMode.ENFORCED); + assertEquals(AdmissionControlMode.fromName("monitor_only"), AdmissionControlMode.MONITOR); + } + + public void testInValidActionType() { + String name = "TEST"; + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> AdmissionControlMode.fromName(name)); + assertEquals(ex.getMessage(), "Invalid AdmissionControlMode: " + name); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java new file mode 100644 index 0000000000000..02f582c26f54e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import org.opensearch.test.OpenSearchTestCase; + +public class TransportActionTypeTests extends OpenSearchTestCase { + + public void testValidActionType() { + assertEquals(TransportActionType.SEARCH.getType(), "search"); + assertEquals(TransportActionType.INDEXING.getType(), "indexing"); + assertEquals(TransportActionType.fromName("search"), TransportActionType.SEARCH); + assertEquals(TransportActionType.fromName("indexing"), TransportActionType.INDEXING); + } + + public void testInValidActionType() { + String name = "test"; + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> TransportActionType.fromName(name)); + assertEquals(ex.getMessage(), "Not Supported TransportAction Type: " + name); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java new file mode 100644 index 0000000000000..43103926a69a2 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java @@ -0,0 +1,153 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Set; + +public class CPUBasedAdmissionControlSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the cpu based admission controller settings should be supported built in settings", + settings.containsAll( + Arrays.asList( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT + ) + ) + ); + } + + public void testDefaultSettings() { + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + long percent = 95; + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportActionsList(), Arrays.asList("indexing", "search")); + } + + public void testGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + } + + public void testUpdateAfterGetDefaultSettings() { + long percent = 95; + long searchPercent = 80; + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + } + + public void testUpdateAfterGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + long searchPercent = 80; + Settings settings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + + Settings updatedSettings = Settings.builder() + .put( + CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + + searchPercent = 70; + + updatedSettings = Settings.builder() + .put(updatedSettings) + .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java new file mode 100644 index 0000000000000..03d4819a94045 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class AdmissionControlTransportHandlerTests extends OpenSearchTestCase { + AdmissionControlTransportHandler admissionControlTransportHandler; + + public void testHandlerInvoked() throws Exception { + String action = "TEST"; + InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler( + action, + handler, + mock(AdmissionControlService.class), + false + ); + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + assertEquals(1, handler.count); + } + + public void testHandlerInvokedRejectedException() throws Exception { + String action = "TEST"; + AdmissionControlService admissionControlService = mock(AdmissionControlService.class); + doThrow(new OpenSearchRejectedExecutionException()).when(admissionControlService).applyTransportAdmissionControl(action); + InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler( + action, + handler, + admissionControlService, + false + ); + try { + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } catch (OpenSearchRejectedExecutionException exception) { + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } + assertEquals(1, handler.count); + } + + public void testHandlerInvokedRandomException() throws Exception { + String action = "TEST"; + AdmissionControlService admissionControlService = mock(AdmissionControlService.class); + doThrow(new NullPointerException()).when(admissionControlService).applyTransportAdmissionControl(action); + InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler( + action, + handler, + admissionControlService, + false + ); + try { + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } catch (Exception exception) { + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } + assertEquals(1, handler.count); + } + + private class InterceptingRequestHandler implements TransportRequestHandler { + private final String action; + public int count; + + public InterceptingRequestHandler(String action) { + this.action = action; + this.count = 0; + } + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + this.count = this.count + 1; + } + } +} diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 889d0dc6ddb14..43ebb86fd5342 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -94,6 +94,7 @@ import java.util.function.Consumer; import java.util.function.Function; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -201,6 +202,13 @@ public void testRegisterRejectsInvalidRepositoryNames() { } } + public void testUpdateOrRegisterRejectsForSystemRepository() { + String repoName = "name"; + PutRepositoryRequest request = new PutRepositoryRequest(repoName); + request.settings(Settings.builder().put(SYSTEM_REPOSITORY_SETTING.getKey(), true).build()); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + } + public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { String repoName = "name"; expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); @@ -211,20 +219,17 @@ public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", emptyState(), clusterStateWithRepoTypeA)); - assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(0)); ClusterState clusterStateWithRepoTypeB = createClusterStateWithRepo(repoName, MeteredRepositoryTypeB.TYPE); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); List repositoriesStats = repositoriesService.repositoriesStats(); - assertThat(repositoriesStats.size(), equalTo(2)); + assertThat(repositoriesStats.size(), equalTo(1)); RepositoryStatsSnapshot repositoryStatsTypeA = repositoriesStats.get(0); - assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeA.TYPE)); - assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeA.STATS)); + assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); + assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); - RepositoryStatsSnapshot repositoryStatsTypeB = repositoriesStats.get(1); - assertThat(repositoryStatsTypeB.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); - assertThat(repositoryStatsTypeB.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); } public void testWithSameKeyProviderNames() { @@ -250,7 +255,7 @@ public void testWithSameKeyProviderNames() { kpTypeA ); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); - assertThat(repositoriesService.repositoriesStats().size(), equalTo(2)); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); MeteredRepositoryTypeB repositoryB = (MeteredRepositoryTypeB) repositoriesService.repository("repoName"); assertNotNull(repositoryB); assertNotNull(repository.cryptoHandler); @@ -310,22 +315,22 @@ public void testRepositoryUpdateWithDifferentCryptoMetadata() { assertNotNull(repository.cryptoHandler); assertEquals(kpTypeA, repository.cryptoHandler.kpType); - expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); cryptoSettings.keyProviderType(kpTypeA); cryptoSettings.settings(Settings.builder().put("key-1", "val-1")); request.cryptoSettings(cryptoSettings); - expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); cryptoSettings.settings(Settings.builder()); cryptoSettings.keyProviderName("random"); - expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); cryptoSettings.keyProviderName(keyProviderName); assertEquals(kpTypeA, repository.cryptoHandler.kpType); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); } public void testCryptoManagerClusterStateChanges() { @@ -355,7 +360,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); assertEquals(kpTypeA, repository.cryptoHandler.kpType); @@ -375,7 +380,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); @@ -397,7 +402,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); assertEquals(kpTypeA, repository.cryptoHandler.kpType); @@ -418,7 +423,7 @@ public void testCryptoManagerClusterStateChanges() { verified, repositoryMetadata ); - repositoriesService.registerRepository(request, null); + repositoriesService.registerOrUpdateRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); assertNotNull(repository.cryptoHandler); assertEquals(kpTypeB, repository.cryptoHandler.kpType); @@ -530,7 +535,7 @@ private ClusterState emptyState() { private void assertThrowsOnRegister(String repoName) { PutRepositoryRequest request = new PutRepositoryRequest(repoName); - expectThrows(RepositoryException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); } private static class TestCryptoProvider implements CryptoHandler { @@ -851,14 +856,7 @@ private static class MeteredRepositoryTypeA extends MeteredBlobStoreRepository { private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clusterService) { - super( - metadata, - false, - mock(NamedXContentRegistry.class), - clusterService, - mock(RecoverySettings.class), - Map.of("bucket", "bucket-a") - ); + super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-a")); if (metadata.cryptoMetadata() != null) { cryptoHandler = new TestCryptoProvider( @@ -892,14 +890,7 @@ private static class MeteredRepositoryTypeB extends MeteredBlobStoreRepository { private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clusterService) { - super( - metadata, - false, - mock(NamedXContentRegistry.class), - clusterService, - mock(RecoverySettings.class), - Map.of("bucket", "bucket-b") - ); + super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-b")); if (metadata.cryptoMetadata() != null) { cryptoHandler = new TestCryptoProvider( diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java index cf0b06a3f7d16..da0cbcb1d4b17 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; @@ -122,14 +121,11 @@ private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repository private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repositoryStats, long clusterVersion) { RepositoryInfo repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), randomAlphaOfLength(10), randomAlphaOfLength(10), - Map.of("bucket", randomAlphaOfLength(10)), - System.currentTimeMillis(), - null + Map.of("bucket", randomAlphaOfLength(10)) ); - return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion, true); + return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion); } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 4e60cddd14d73..57c126b85ff70 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -66,7 +66,9 @@ protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String rem BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { - return Arrays.stream(lockDirectory.listAll()).filter(lock -> lock.endsWith(".lock")).toArray(String[]::new); + return Arrays.stream(lockDirectory.listAll()) + .filter(lock -> lock.endsWith(".lock") || lock.endsWith(".v2_lock")) + .toArray(String[]::new); } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index e3e1bf31e82dc..9cca495cced72 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -145,7 +145,7 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final SnapshotId snapshotId1 = snapshotInfo.snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 0) : "there should be no lock files present in directory, but found " + Arrays.toString(lockFiles); + assertEquals("there should be no lock files present in directory, but found " + Arrays.toString(lockFiles), 0, lockFiles.length); logger.info("--> create remote index shallow snapshot"); Settings snapshotRepoSettingsForShallowCopy = Settings.builder() .put(snapshotRepoSettings) @@ -161,8 +161,8 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final SnapshotId snapshotId2 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId2.getUUID() + ".v2_lock")); logger.info("--> create another normal snapshot"); updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); @@ -174,8 +174,8 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final SnapshotId snapshotId3 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId2.getUUID() + ".v2_lock")); logger.info("--> make sure the node's repository can resolve the snapshots"); final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); @@ -230,8 +230,8 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { final SnapshotId snapshotId = snapshotInfo.snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId.getUUID() + ".v2_lock")); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); @@ -305,8 +305,8 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId1 = snapshotInfo.snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId1.getUUID() + ".lock"); + assertEquals("lock files are " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId1.getUUID() + ".v2_lock")); logger.info("--> create second remote index shallow snapshot"); snapshotInfo = createSnapshot( @@ -317,10 +317,10 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId2 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); + assertEquals("lock files are " + Arrays.toString(lockFiles), 2, lockFiles.length); List shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2); for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID()); + assertTrue(lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID())); } logger.info("--> create third remote index shallow snapshot"); snapshotInfo = createSnapshot( @@ -331,12 +331,14 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId3 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3); + assertEquals(3, lockFiles.length); shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); + assertTrue( + lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()) + ); } logger.info("--> create normal snapshot"); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); @@ -348,12 +350,14 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId4 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3) : "lock files are " + Arrays.toString(lockFiles); + assertEquals("lock files are " + Arrays.toString(lockFiles), 3, lockFiles.length); shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); + assertTrue( + lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()) + ); } logger.info("--> make sure the node's repository can resolve the snapshots"); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index e097c7025e4fe..9c65ad32fa6a6 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -252,7 +252,7 @@ public void testBadChunksize() throws Exception { ); } - public void testFsRepositoryCompressDeprecated() { + public void testFsRepositoryCompressDeprecatedIgnored() { final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final Settings settings = Settings.builder().put(node().settings()).put("location", location).build(); final RepositoryMetadata metadata = new RepositoryMetadata("test-repo", REPO_TYPE, settings); @@ -265,10 +265,7 @@ public void testFsRepositoryCompressDeprecated() { new FsRepository(metadata, useCompressEnvironment, null, BlobStoreTestUtil.mockClusterService(), null); - assertWarnings( - "[repositories.fs.compress] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version." - ); + assertNoDeprecationWarnings(); } private static void writeIndexGen(BlobStoreRepository repository, RepositoryData repositoryData, long generation) throws Exception { diff --git a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java index 303f60283f69f..d9f599714805b 100644 --- a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java @@ -58,6 +58,7 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.shard.ShardId; @@ -70,6 +71,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -90,6 +92,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.is; public class FsRepositoryTests extends OpenSearchTestCase { @@ -218,6 +221,31 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { } } + public void testRestrictedSettingsDefault() { + Path repo = createTempDir(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .put("location", repo) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + RepositoryMetadata metadata = new RepositoryMetadata("test", "fs", settings); + FsRepository repository = new FsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + + List> restrictedSettings = repository.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(4)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(FsRepository.LOCATION_SETTING)); + } + private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); threadPool.generic().submit(() -> { diff --git a/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java new file mode 100644 index 0000000000000..db2cf9c3e9582 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.fs; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.compress.ZstdCompressor; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.file.Path; +import java.util.Locale; + +public class ReloadableFsRepositoryTests extends OpenSearchTestCase { + ReloadableFsRepository repository; + RepositoryMetadata metadata; + Settings settings; + Path repo; + + @Override + public void setUp() throws Exception { + super.setUp(); + + repo = createTempDir(); + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", false) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + repository = new ReloadableFsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + } + + /** + * Validates that {@link ReloadableFsRepository} supports inplace reloading + */ + public void testIsReloadable() { + assertTrue(repository.isReloadable()); + } + + /** + * Updates repository metadata of an existing repository to enable default compressor + */ + public void testCompressReload() { + assertEquals(CompressorRegistry.none(), repository.getCompressor()); + updateCompressionTypeToDefault(); + repository.validateMetadata(metadata); + repository.reload(metadata); + assertEquals(CompressorRegistry.defaultCompressor(), repository.getCompressor()); + } + + /** + * Updates repository metadata of an existing repository to change compressor type from default to Zstd + */ + public void testCompressionTypeReload() { + assertEquals(CompressorRegistry.none(), repository.getCompressor()); + updateCompressionTypeToDefault(); + repository = new ReloadableFsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + assertEquals(CompressorRegistry.defaultCompressor(), repository.getCompressor()); + + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", true) + .put("compression_type", ZstdCompressor.NAME.toLowerCase(Locale.ROOT)) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + repository.validateMetadata(metadata); + repository.reload(metadata); + assertEquals(CompressorRegistry.getCompressor(ZstdCompressor.NAME.toUpperCase(Locale.ROOT)), repository.getCompressor()); + } + + private void updateCompressionTypeToDefault() { + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", true) + .put("compression_type", DeflateCompressor.NAME.toLowerCase(Locale.ROOT)) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + } +} diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 4bbb03322b1f8..2661873d9498f 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -321,9 +321,9 @@ public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { // deleteall DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[0])); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -489,8 +489,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * assert without point in time + /* + assert without point in time */ assertThat( @@ -509,8 +509,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * using point in time id will have the same search results as ones before update + /* + using point in time id will have the same search results as ones before update */ assertThat( client().prepareSearch() diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java index 728b7162bf478..ce96623ea06df 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java @@ -135,7 +135,7 @@ private ValuesSourceConfig getVSConfig( indexed, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap() @@ -184,7 +184,7 @@ public void testShortcutIsApplicable() throws IOException { assertNull(pointReaderShim(mockSearchContext(null), null, getVSConfig("number", resolution, false, context))); } // Check that we decode a dates "just like" the doc values instance. - Instant expected = Instant.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2020-01-01T00:00:00Z")); + Instant expected = Instant.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2020-01-01T00:00:00Z")); byte[] scratch = new byte[8]; LongPoint.encodeDimension(DateFieldMapper.Resolution.MILLISECONDS.convert(expected), scratch, 0); assertThat( diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java index 5a9a9e2b6cb51..d6ba4eedd3a19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java @@ -43,7 +43,7 @@ /** * Mock scripts shared by DateRangeIT and DateHistogramIT. - * + *

                            * Provides {@link DateScriptMocksPlugin#EXTRACT_FIELD}, {@link DateScriptMocksPlugin#DOUBLE_PLUS_ONE_MONTH}, * and {@link DateScriptMocksPlugin#LONG_PLUS_ONE_MONTH} scripts. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java index aa51f9b11ea19..39054f15826f0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java @@ -44,7 +44,7 @@ /** * Duplicates the tests from {@link CompositeAggregationBuilderTests}, except using the deprecated * interval on date histo. Separated to make testing the warnings easier. - * + *

                            * Can be removed in when the legacy interval options are gone */ public class LegacyIntervalCompositeAggBuilderTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java index f5f4f3d4b0723..fdbc0160e51a3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java @@ -125,7 +125,7 @@ protected final DateFieldMapper.DateFieldType aggregableDateFieldType(boolean us isSearchable, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), useNanosecondResolution ? DateFieldMapper.Resolution.NANOSECONDS : DateFieldMapper.Resolution.MILLISECONDS, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 4e8dc20c465a9..bca6623e66104 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -1245,7 +1245,7 @@ private void testSearchCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } private static long asLong(String dateTime, DateFieldMapper.DateFieldType fieldType) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java index 08adc6e3d550a..5c12d070824f2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java @@ -1086,7 +1086,7 @@ private void testCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } private static ZonedDateTime asZDT(String dateTime) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java index 26ccc1075220b..96c8be1a25cc3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java @@ -273,7 +273,7 @@ private void testCase( true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java index 761615ad1d7eb..dd7ae915c3b45 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java @@ -136,7 +136,7 @@ public void testDateFieldNanosecondResolution() throws IOException { true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFieldMapper.Resolution.NANOSECONDS, null, Collections.emptyMap() @@ -167,7 +167,7 @@ public void testMissingDateWithDateField() throws IOException { true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFieldMapper.Resolution.NANOSECONDS, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java index e29eb95a52cd7..b4dbc8017bc3b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java @@ -45,7 +45,7 @@ /** * Provides a number of dummy scripts for tests. - * + *

                            * Each script provided allows for an {@code inc} parameter which will * be added to each value read from a document. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index 3073cb6d35ddf..de213a154c3c5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -80,15 +80,15 @@ public class AvgBucketAggregatorTests extends AggregatorTestCase { /** * Test for issue #30608. Under the following circumstances: - * + *

                            * A. Multi-bucket agg in the first entry of our internal list * B. Regular agg as the immediate child of the multi-bucket in A * C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list * D. Finally, a pipeline agg with the path down to B - * + *

                            * BucketMetrics reduction would throw a class cast exception due to bad subpathing. This test ensures * it is fixed. - * + *

                            * Note: we have this test inside of the `avg_bucket` package so that we can get access to the package-private * `reduce()` needed for testing this */ @@ -144,6 +144,6 @@ public void testSameAggNames() throws IOException { } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index 17846234a09d1..5d7636208bd70 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -344,6 +344,6 @@ private void executeTestCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java index 3ae00efaa6da3..d6abe50ea5201 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java @@ -169,6 +169,6 @@ private void executeTestCase(Query query, DateHistogramAggregationBuilder aggBui } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index f0d930c4c3acb..9778798b706f4 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -249,10 +249,11 @@ public void testSearchTaskInFlightCancellation() { verify(mockTaskManager, times(10)).cancelTaskAndDescendants(any(), anyString(), anyBoolean(), any()); assertEquals(3, service.getSearchBackpressureState(SearchTask.class).getLimitReachedCount()); - // Verify search backpressure stats. + // Verify search backpressure stats. Since we are not marking any task as completed the completionCount will be 0 + // for SearchTaskStats here. SearchBackpressureStats expectedStats = new SearchBackpressureStats( - new SearchTaskStats(10, 3, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(10))), - new SearchShardTaskStats(0, 0, Collections.emptyMap()), + new SearchTaskStats(10, 3, 0, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(10))), + new SearchShardTaskStats(0, 0, 0, Collections.emptyMap()), SearchBackpressureMode.ENFORCED ); SearchBackpressureStats actualStats = service.nodeStats(); @@ -323,10 +324,11 @@ public void testSearchShardTaskInFlightCancellation() { verify(mockTaskManager, times(12)).cancelTaskAndDescendants(any(), anyString(), anyBoolean(), any()); assertEquals(3, service.getSearchBackpressureState(SearchShardTask.class).getLimitReachedCount()); - // Verify search backpressure stats. + // Verify search backpressure stats. We are marking 20 SearchShardTasks as completed this should get + // reflected in SearchShardTaskStats. SearchBackpressureStats expectedStats = new SearchBackpressureStats( - new SearchTaskStats(0, 0, Collections.emptyMap()), - new SearchShardTaskStats(12, 3, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(12))), + new SearchTaskStats(0, 0, 0, Collections.emptyMap()), + new SearchShardTaskStats(12, 3, 20, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(12))), SearchBackpressureMode.ENFORCED ); SearchBackpressureStats actualStats = service.nodeStats(); diff --git a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java index 6478fdfff61d4..f28b82cad30d3 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java @@ -39,6 +39,11 @@ public static SearchShardTaskStats randomInstance() { new ElapsedTimeTracker.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) ); - return new SearchShardTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); + return new SearchShardTaskStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + resourceUsageTrackerStats + ); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java index eb33bc1c37b7e..cc7aa92826b41 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java @@ -40,6 +40,6 @@ public static SearchTaskStats randomInstance() { new ElapsedTimeTracker.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) ); - return new SearchTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); + return new SearchTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java index c19c1ebcb5c26..ce7344fa0d2d6 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java @@ -46,7 +46,7 @@ public void testSerializationRoundtrip() throws IOException { /** * When serializing / deserializing to / from old versions, processor type info is lost. - * + *

                            * Also, we only supported request/response processors. */ public void testSerializationRoundtripBackcompat() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index f29ba3b0cea07..db14eb90ef839 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -333,6 +333,58 @@ public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() thr directory.close(); } + public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + final Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(26, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertNotNull(queryBreakDownMap.get(timingTypeKey)); + assertNotNull(queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for current breakdown type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + } + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + private Map getLeafBreakdownMap(long startTime, long timeTaken, long count) { Map leafBreakDownMap = new HashMap<>(); for (QueryTimingType timingType : QueryTimingType.values()) { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java new file mode 100644 index 0000000000000..736bbcdd9e8dd --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.LinkedList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ConcurrentQueryProfilerTests extends OpenSearchTestCase { + + public void testMergeRewriteTimeIntervals() { + ConcurrentQueryProfiler profiler = new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()); + List timers = new LinkedList<>(); + timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L)); + LinkedList mergedIntervals = profiler.mergeRewriteTimeIntervals(timers); + assertThat(mergedIntervals.size(), equalTo(2)); + long[] interval = mergedIntervals.get(0); + assertThat(interval[0], equalTo(553074509287335L)); + assertThat(interval[1], equalTo(553074509516290L)); + interval = mergedIntervals.get(1); + assertThat(interval[0], equalTo(553074511206907L)); + assertThat(interval[1], equalTo(553074511424041L)); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index 64a440b85eb10..481a224f2ff0e 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -161,7 +161,9 @@ public void tearDown() throws Exception { } public void testBasic() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); @@ -228,7 +230,9 @@ public void testBasic() throws IOException { } public void testNoScoring() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed @@ -295,7 +299,9 @@ public void testNoScoring() throws IOException { } public void testUseIndexStats() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats @@ -309,7 +315,9 @@ public void testUseIndexStats() throws IOException { } public void testApproximations() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index fc77c1b356124..28af8a63cfba8 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -1104,10 +1104,10 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); if (executor != null) { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1278,7 +1278,7 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(4L)); assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); } @@ -1400,7 +1400,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1435,7 +1435,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index 03f0d27188027..c5f36fcc01983 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -43,6 +43,7 @@ import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -65,8 +66,13 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; +import org.mockito.ArgumentCaptor; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class BlobStoreFormatTests extends OpenSearchTestCase { @@ -128,42 +134,36 @@ public void testBlobStoreAsyncOperations() throws IOException, InterruptedExcept BlobPath.cleanPath(), null ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); - + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); CountDownLatch latch = new CountDownLatch(2); - ActionListener actionListener = new ActionListener<>() { - @Override - public void onResponse(Void unused) { - logger.info("---> Async write succeeded"); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - logger.info("---> Failure in async write"); - throw new RuntimeException("async write should not fail"); - } - }; - // Write blobs in different formats checksumSMILE.writeAsync( new BlobObj("checksum smile"), - mockBlobContainer, + spyContainer, "check-smile", CompressorRegistry.none(), - actionListener + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS ); checksumSMILE.writeAsync( new BlobObj("checksum smile compressed"), - mockBlobContainer, + spyContainer, "check-smile-comp", CompressorRegistry.getCompressor(DeflateCompressor.NAME), - actionListener + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS ); latch.await(); + verify(spyContainer, times(2)).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(2, writeContextArgumentCaptor.getAllValues().size()); + writeContextArgumentCaptor.getAllValues() + .forEach(writeContext -> assertEquals(WritePriority.NORMAL, writeContext.getWritePriority())); // Assert that all checksum blobs can be read assertEquals(checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile", xContentRegistry()).getText(), "checksum smile"); assertEquals( @@ -172,6 +172,39 @@ public void onFailure(Exception e) { ); } + public void testBlobStorePriorityAsyncOperation() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + CountDownLatch latch = new CountDownLatch(1); + + // Write blobs in different formats + checksumSMILE.writeAsyncWithUrgentPriority( + new BlobObj("cluster state diff"), + spyContainer, + "cluster-state-diff", + CompressorRegistry.none(), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + latch.await(); + + verify(spyContainer).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(WritePriority.URGENT, writeContextArgumentCaptor.getValue().getWritePriority()); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "cluster-state-diff", xContentRegistry()).getText(), + "cluster state diff" + ); + } + public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); @@ -226,6 +259,24 @@ public void testBlobCorruption() throws IOException { } } + private ActionListener getVoidActionListener(CountDownLatch latch) { + ActionListener actionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("---> Async write succeeded"); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + logger.info("---> Failure in async write"); + throw new RuntimeException("async write should not fail"); + } + }; + + return actionListener; + } + protected BlobStore createTestBlobStore() throws IOException { return new FsBlobStore(randomIntBetween(1, 8) * 1024, createTempDir(), false); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 09f5c1bea1a5e..b7a2baacba611 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -178,6 +178,7 @@ import org.opensearch.gateway.TransportNodesListGatewayStartedShards; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.remote.RemoteStorePressureService; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; @@ -195,6 +196,7 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; @@ -221,6 +223,7 @@ import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; @@ -2070,7 +2073,8 @@ public void onFailure(final Exception e) { repositoriesServiceReference::get, fileCacheCleaner, null, - new RemoteStoreStatsTrackerFactory(clusterService, settings) + new RemoteStoreStatsTrackerFactory(clusterService, settings), + DefaultRecoverySettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -2121,7 +2125,8 @@ public void onFailure(final Exception e) { shardStateAction, actionFilters, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ), new GlobalCheckpointSyncAction( @@ -2184,10 +2189,12 @@ public void onFailure(final Exception e) { clusterService, mock(IndicesService.class), mock(ShardStateAction.class), + mock(SegmentReplicationStatsTracker.class), mock(ThreadPool.class) ), mock(RemoteStorePressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); actions.put( BulkAction.INSTANCE, @@ -2211,7 +2218,8 @@ public void onFailure(final Exception e) { new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, new SystemIndices(emptyMap())), new IndexingPressureService(settings, clusterService), mock(IndicesService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ); final RestoreService restoreService = new RestoreService( @@ -2300,7 +2308,8 @@ public void onFailure(final Exception e) { List.of(), client ), - null + null, + NoopMetricsRegistry.INSTANCE ) ); actions.put( diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index ca8bec469f3bc..f9388c9e4b86e 100644 --- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -90,7 +90,7 @@ public MockEventuallyConsistentRepository( final Context context, final Random random ) { - super(metadata, false, namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.context = context; this.namedXContentRegistry = namedXContentRegistry; this.random = random; diff --git a/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java new file mode 100644 index 0000000000000..80942123fd4fd --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.TracingTelemetry; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; + +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MetricsRegistryFactoryTests extends OpenSearchTestCase { + + private MetricsRegistryFactory metricsRegistryFactory; + + @After + public void close() { + metricsRegistryFactory.close(); + } + + public void testGetMeterRegistryWithUnavailableMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.empty()); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(metricsRegistry.createCounter("test", "test", "test") == NoopCounter.INSTANCE); + assertTrue(metricsRegistry.createUpDownCounter("test", "test", "test") == NoopCounter.INSTANCE); + } + + public void testGetMetricsWithAvailableMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(mock(MetricsTelemetry.class)); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof DefaultMetricsRegistry); + + } + + public void testNullMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(null); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + + } + + private Set> getClusterSettings() { + Set> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + return allTracerSettings; + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java index b27f888eaf502..3a388be22445e 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -83,6 +83,18 @@ public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { } + public void testNullTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(null); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof NoopTracer); + + } + private Set> getClusterSettings() { Set> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java index 43e0cb8e44439..8606104d26103 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class WrappedTracerTests extends OpenSearchTestCase { @@ -38,6 +39,7 @@ public void testStartSpanWithTracingDisabledInvokesNoopTracer() throws Exception SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof NoopTracer); + assertFalse(wrappedTracer.isRecording()); verify(mockDefaultTracer, never()).startSpan(SpanCreationContext.internal().name("foo")); } } @@ -46,12 +48,13 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracer() throws Excepti Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); - + when(mockDefaultTracer.isRecording()).thenReturn(true); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); + assertTrue(wrappedTracer.isRecording()); verify(mockDefaultTracer).startSpan(eq(spanCreationContext)); } } diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index 92bd15d818bca..19271bbf30e80 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -32,10 +32,13 @@ package org.opensearch.threadpool; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -43,14 +46,29 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; public class ScalingThreadPoolTests extends OpenSearchThreadPoolTestCase { + @ParametersFactory + public static Collection scalingThreadPools() { + return ThreadPool.THREAD_POOL_TYPES.entrySet() + .stream() + .filter(t -> t.getValue().equals(ThreadPool.ThreadPoolType.SCALING)) + .map(e -> new String[] { e.getKey() }) + .collect(Collectors.toList()); + } + + private final String threadPoolName; + + public ScalingThreadPoolTests(String threadPoolName) { + this.threadPoolName = threadPoolName; + } + public void testScalingThreadPoolConfiguration() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); final int core; @@ -136,11 +154,11 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.TRANSLOG_SYNC, n -> 4 * n); sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessorsMaxFive); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessorsMaxTen); + sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); return sizes.get(threadPoolName).apply(numberOfProcessors); } public void testScalingThreadPoolIsBounded() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int size = randomIntBetween(32, 512); final Settings settings = Settings.builder().put("thread_pool." + threadPoolName + ".max", size).build(); runScalingThreadPoolTest(settings, (clusterSettings, threadPool) -> { @@ -170,7 +188,6 @@ public void testScalingThreadPoolIsBounded() throws InterruptedException { } public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int min = "generic".equals(threadPoolName) ? 4 : 1; final Settings settings = Settings.builder() .put("thread_pool." + threadPoolName + ".max", 128) diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index 9a261c5745bc2..e002297911788 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -50,6 +50,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -117,7 +118,8 @@ public void sendMessage(BytesReference reference, ActionListener listener) handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java index 06545b77c6d76..7ab78cca7d615 100644 --- a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -255,7 +256,8 @@ private void testDefaultSeedAddresses(final Settings settings, Matcher R /** * Executes the given action. - * + *

                            * This is a shim method to make execution publicly available in tests. */ public static void execute( diff --git a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java index 4ee1314d27fe1..5566b493adc7d 100644 --- a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java @@ -54,7 +54,7 @@ public void resetTerminal() { /** * Runs a command with the given args. - * + *

                            * Output can be found in {@link #terminal}. */ public String execute(String... args) throws Exception { diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 6354cf18e8b62..2ba4de5e54a67 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -112,6 +112,7 @@ List adjustNodesStats(List nodesStats) { nodeStats.getDiscoveryStats(), nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats(), + nodeStats.getResourceUsageStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), nodeStats.getShardIndexingPressureStats(), @@ -120,7 +121,9 @@ List adjustNodesStats(List nodesStats) { nodeStats.getWeightedRoutingStats(), nodeStats.getFileCacheStats(), nodeStats.getTaskCancellationStats(), - nodeStats.getSearchPipelineStats() + nodeStats.getSearchPipelineStats(), + nodeStats.getSegmentReplicationRejectionStats(), + nodeStats.getRepositoriesStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java index 7f1d1d3381751..0c08de252e4cd 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java @@ -209,28 +209,28 @@ public int allocateAndCheckIndexShardHotSpots(boolean expected, int nodes, Strin continue; } - /** - * Hot spots can occur due to the order in which shards get allocated to nodes. - * A node with fewer shards may not be able to accept current shard due to - * SameShardAllocationDecider, causing it to breach allocation constraint on - * another node. We need to differentiate between such hot spots v/s actual hot - * spots. - * - * A simple check could be to ensure there is no node with shards less than - * allocation limit, that can accept current shard. However, in current - * allocation algorithm, when nodes get throttled, shards are added to - * ModelNodes without adding them to actual cluster (RoutingNodes). As a result, - * the shards per node we see here, are different from the ones observed by - * weight function in balancer. RoutingNodes with {@link count} < {@link limit} - * may not have had the same count in the corresponding ModelNode seen by weight - * function. We hence use the following alternate check -- - * - * Given the way {@link limit} is defined, we should not have hot spots if *all* - * nodes are eligible to accept the shard. A hot spot is acceptable, if either - * all peer nodes have {@link count} > {@link limit}, or if even one node is - * ineligible to accept the shard due to SameShardAllocationDecider, as this - * leads to a chain of events that breach IndexShardsPerNode constraint on all - * other nodes. + /* + Hot spots can occur due to the order in which shards get allocated to nodes. + A node with fewer shards may not be able to accept current shard due to + SameShardAllocationDecider, causing it to breach allocation constraint on + another node. We need to differentiate between such hot spots v/s actual hot + spots. + + A simple check could be to ensure there is no node with shards less than + allocation limit, that can accept current shard. However, in current + allocation algorithm, when nodes get throttled, shards are added to + ModelNodes without adding them to actual cluster (RoutingNodes). As a result, + the shards per node we see here, are different from the ones observed by + weight function in balancer. RoutingNodes with {@link count} < {@link limit} + may not have had the same count in the corresponding ModelNode seen by weight + function. We hence use the following alternate check -- + + Given the way {@link limit} is defined, we should not have hot spots if *all* + nodes are eligible to accept the shard. A hot spot is acceptable, if either + all peer nodes have {@link count} > {@link limit}, or if even one node is + ineligible to accept the shard due to SameShardAllocationDecider, as this + leads to a chain of events that breach IndexShardsPerNode constraint on all + other nodes. */ // If all peer nodes have count >= limit, hotspot is acceptable diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index d24cc24d28579..28d7706fb1493 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -1016,6 +1016,11 @@ public void setLastAcceptedState(ClusterState clusterState) { delegate.setLastAcceptedState(clusterState); } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public void close() { assertTrue(openPersistedStates.remove(this)); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java index 60aacb83e0dc1..946b980bfb62f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java @@ -476,22 +476,22 @@ void unlift() { /** * A cache optimized for small bit-counts (less than 64) and small number of unique permutations of state objects. - * + *

                            * Each combination of states is kept once only, building on the * assumption that the number of permutations is small compared to the * number of bits permutations. For those histories that are difficult to check * we will have many bits combinations that use the same state permutations. - * + *

                            * The smallMap optimization allows us to avoid object overheads for bit-sets up to 64 bit large. - * + *

                            * Comparing set of (bits, state) to smallMap: * (bits, state) : 24 (tuple) + 24 (FixedBitSet) + 24 (bits) + 5 (hash buckets) + 24 (hashmap node). * smallMap bits to {state} : 10 (bits) + 5 (hash buckets) + avg-size of unique permutations. - * + *

                            * The avg-size of the unique permutations part is very small compared to the * sometimes large number of bits combinations (which are the cases where * we run into trouble). - * + *

                            * set of (bits, state) totals 101 bytes compared to smallMap bits to { state } * which totals 15 bytes, ie. a 6x improvement in memory usage. */ diff --git a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java index e4774030f21cb..5f4d92d65548b 100644 --- a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java @@ -52,11 +52,11 @@ * Fields available upon process startup: type, timestamp, level, component, * message, node.name, cluster.name. * Whereas node.id and cluster.uuid are available later once the first clusterState has been received. - * + *

                            * * node.name, cluster.name, node.id, cluster.uuid * should not change across all log lines - * + *

                            * Note that this won't pass for nodes in clusters that don't have the node name defined in opensearch.yml and start * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName. */ diff --git a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java index d77596cf5cdd1..2f006a5519d69 100644 --- a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java @@ -67,6 +67,12 @@ public class MockGatewayMetaState extends GatewayMetaState { private final BigArrays bigArrays; private final RemoteClusterStateService remoteClusterStateService; private final RemoteStoreRestoreService remoteStoreRestoreService; + private boolean prepareFullState = false; + + public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays, boolean prepareFullState) { + this(localNode, bigArrays); + this.prepareFullState = prepareFullState; + } public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays) { this.localNode = localNode; @@ -99,8 +105,12 @@ Metadata upgradeMetadataForNode( @Override ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { - // Just set localNode here, not to mess with ClusterService and IndicesService mocking - return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + if (prepareFullState) { + return super.prepareInitialClusterState(transportService, clusterService, clusterState); + } else { + // Just set localNode here, not to mess with ClusterService and IndicesService mocking + return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + } } @Override @@ -113,6 +123,16 @@ public void start( NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry, PersistedStateRegistry persistedStateRegistry + ) { + start(settings, nodeEnvironment, xContentRegistry, persistedStateRegistry, false); + } + + public void start( + Settings settings, + NodeEnvironment nodeEnvironment, + NamedXContentRegistry xContentRegistry, + PersistedStateRegistry persistedStateRegistry, + boolean prepareFullState ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(mock(ThreadPool.class)); @@ -126,6 +146,7 @@ public void start( } catch (IOException e) { throw new AssertionError(e); } + this.prepareFullState = prepareFullState; start( settings, transportService, diff --git a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java index 72d34676850b9..0a47db4c740d6 100644 --- a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java +++ b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java @@ -46,7 +46,7 @@ /** * A plugin to use {@link MockEngineFactory}. - * + *

                            * Subclasses may override the reader wrapper used. */ public class MockEngineFactoryPlugin extends Plugin implements EnginePlugin { diff --git a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java index f016d9450425d..5ab77783b2bac 100644 --- a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java @@ -56,7 +56,7 @@ private RandomCreateIndexGenerator() {} /** * Returns a random {@link CreateIndexRequest}. - * + *

                            * Randomizes the index name, the aliases, mappings and settings associated with the * index. If present, the mapping definition will be nested under a type name. */ diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 15f9ee546fe6b..43289a7c89524 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -527,7 +527,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, @@ -872,7 +872,13 @@ public EngineConfig config( final Engine.EventListener eventListener ) { final IndexWriterConfig iwc = newIndexWriterConfig(); - final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + final TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE, + "" + ); final List extRefreshListenerList = externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener); @@ -939,7 +945,7 @@ protected EngineConfig config( .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build() ); - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, ""); return new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) .indexSettings(indexSettings) diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java index 83bb2f1ee7d65..77137073aa30f 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java @@ -149,7 +149,7 @@ protected Set unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer method) { @@ -158,7 +158,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer method) { diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java index 1987a49431bf9..5dfb2f16a1aae 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java @@ -139,7 +139,7 @@ protected Set unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer method) { @@ -148,7 +148,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer method) { diff --git a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java index b29e25a0bff2c..bcd47e3d578ee 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java @@ -17,6 +17,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.util.List; +import java.util.function.BiConsumer; /** * This class is used by unit tests implementing SegmentReplicationSource @@ -36,6 +37,7 @@ public abstract void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 4505570b8ebe0..412d5235fe462 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -119,6 +119,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryResponse; @@ -173,6 +174,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -640,7 +642,7 @@ protected IndexShard newShard( Collections.emptyList(), clusterSettings ); - Store remoteStore = null; + Store remoteStore; RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); @@ -659,6 +661,8 @@ protected IndexShard newShard( remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, indexSettings.getSettings()); BlobStoreRepository repo = createRepository(remotePath); when(mockRepoSvc.repository(any())).thenAnswer(invocationOnMock -> repo); + } else { + remoteStore = null; } final BiFunction translogFactorySupplier = (settings, shardRouting) -> { @@ -697,7 +701,9 @@ protected IndexShard newShard( checkpointPublisher, remoteStore, remoteStoreStatsTrackerFactory, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + "dummy-node", + DefaultRecoverySettings.INSTANCE ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -1081,7 +1087,7 @@ protected void recoverReplica( /** * Recovers a replica from the give primary, allow the user to supply a custom recovery target. A typical usage of a custom recovery * target is to assert things in the various stages of recovery. - * + *

                            * Note: this method keeps the shard in {@link IndexShardState#POST_RECOVERY} and doesn't start it. * * @param replica the recovery target shard @@ -1615,6 +1621,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { try ( diff --git a/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java new file mode 100644 index 0000000000000..359668f5dad71 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RecoverySettings} instance containing all defaults + */ +public final class DefaultRecoverySettings { + private DefaultRecoverySettings() {} + + public static final RecoverySettings INSTANCE = new RecoverySettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java index ef13e065968df..0f5e043ee1135 100644 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java @@ -43,7 +43,7 @@ public class PriviledgedMockMaker implements MockMaker { * since Mockito does not support SecurityManager out of the box. The method has to be called by * test framework before the SecurityManager is being set, otherwise additional permissions have * to be granted to the caller: - * + *

                            * permission java.security.Permission "createAccessControlContext" * */ diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index bf020b1bc292d..faa9d52b105b2 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -238,7 +238,7 @@ public void testRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } - private Map getMockRequestCounts() { + protected Map getMockRequestCounts() { for (HttpHandler h : handlers.values()) { while (h instanceof DelegatingHttpHandler) { if (h instanceof HttpStatsCollectorHandler) { @@ -265,7 +265,7 @@ protected static void drainInputStream(final InputStream inputStream) throws IOE /** * HTTP handler that injects random service errors - * + *

                            * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -339,7 +339,7 @@ public interface DelegatingHttpHandler extends HttpHandler { /** * HTTP handler that allows collect request stats per request type. - * + *

                            * Implementors should keep track of the desired requests on {@link #maybeTrack(String, Headers)}. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") @@ -377,7 +377,7 @@ public void handle(HttpExchange exchange) throws IOException { /** * Tracks the given request if it matches the criteria. - * + *

                            * The request is represented as: * Request = Method SP Request-URI * diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index cb0614ddeb808..83b245a1bcecb 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -58,14 +58,14 @@ /** * A mocked script engine that can be used for testing purpose. - * + *

                            * This script engine allows to define a set of predefined scripts that basically a combination of a key and a * function: - * + *

                            * The key can be anything as long as it is a {@link String} and is used to resolve the scripts * at compilation time. For inline scripts, the key can be a description of the script. For stored and file scripts, * the source must match a key in the predefined set of scripts. - * + *

                            * The function is used to provide the result of the script execution and can return anything. */ public class MockScriptEngine implements ScriptEngine { diff --git a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java index 26b439fa6438f..9cf2141555957 100644 --- a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java +++ b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java @@ -39,7 +39,7 @@ /** * A float encapsulation that dynamically accesses the score of a document. - * + *

                            * The provided {@link DocLookup} is used to retrieve the score * for the current document. */ diff --git a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java index b942136e1f1e2..74de1e6d96d93 100644 --- a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java @@ -131,6 +131,9 @@ public static SearchRequest randomSearchRequest(Supplier ra if (randomBoolean()) { searchRequest.setCancelAfterTimeInterval(TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval")); } + if (randomBoolean()) { + searchRequest.setPhaseTook(randomBoolean()); + } return searchRequest; } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 5c649f1dc832d..82f15a590bea6 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -685,7 +685,7 @@ protected static IndexReader maybeWrapReaderEs(DirectoryReader reader) throws IO * Implementors should return a list of {@link ValuesSourceType} that the aggregator supports. * This is used to test the matrix of supported/unsupported field types against the aggregator * and verify it works (or doesn't) as expected. - * + *

                            * If this method is implemented, {@link AggregatorTestCase#createAggBuilderForTypeTest(MappedFieldType, String)} * should be implemented as well. * @@ -702,7 +702,7 @@ protected List getSupportedValuesSourceTypes() { * The field type and name are provided, and the implementor is expected to return an AggBuilder accordingly. * The AggBuilder should be returned even if the aggregation does not support the field type, because * the test will check if an exception is thrown in that case. - * + *

                            * The list of supported types are provided by {@link AggregatorTestCase#getSupportedValuesSourceTypes()}, * which must also be implemented. * @@ -720,7 +720,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy * A method that allows implementors to specifically denylist particular field types (based on their content_name). * This is needed in some areas where the ValuesSourceType is not granular enough, for example integer values * vs floating points, or `keyword` bytes vs `binary` bytes (which are not searchable) - * + *

                            * This is a denylist instead of an allowlist because there are vastly more field types than ValuesSourceTypes, * and it's expected that these unsupported cases are exceptional rather than common */ @@ -734,7 +734,7 @@ protected List unsupportedMappedFieldTypes() { * is provided by the implementor class, and it is executed against each field type in turn. If * an exception is thrown when the field is supported, that will fail the test. Similarly, if * an exception _is not_ thrown when a field is unsupported, that will also fail the test. - * + *

                            * Exception types/messages are not currently checked, just presence/absence of an exception. */ public void testSupportedFieldTypes() throws IOException { @@ -825,7 +825,7 @@ private ValuesSourceType fieldToVST(MappedFieldType fieldType) { /** * Helper method to write a single document with a single value specific to the requested fieldType. - * + *

                            * Throws an exception if it encounters an unknown field type, to prevent new ones from sneaking in without * being tested. */ diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java index 1b348dc7d41a7..6b5ec838f401d 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -305,6 +305,6 @@ protected static Map createAfterKey(Object... fields) { } protected static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 1bb1e44a8a600..0ee889af5ce1a 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -331,6 +331,12 @@ public static void blockNodeOnAnyFiles(String repository, String nodeName) { ); } + public static void blockNodeOnAnySegmentFile(String repository, String nodeName) { + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnSegmentFiles( + true + ); + } + public static void blockDataNode(String repository, String nodeName) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnDataFiles(true); } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java index 7db71c4be0968..72c4ba44d0a31 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java @@ -139,6 +139,8 @@ public long getFailureCount() { private volatile boolean blockOnDataFiles; + private volatile boolean blockOnSegmentFiles; + private volatile boolean blockOnDeleteIndexN; /** @@ -190,6 +192,7 @@ public MockRepository( maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnAnyFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); + blockOnSegmentFiles = metadata.settings().getAsBoolean("block_on_segment", false); blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); @@ -237,6 +240,7 @@ public synchronized void unblock() { blocked = false; // Clean blocking flags, so we wouldn't try to block again blockOnDataFiles = false; + blockOnSegmentFiles = false; blockOnAnyFiles = false; blockAndFailOnWriteIndexFile = false; blockOnWriteIndexFile = false; @@ -259,6 +263,14 @@ public void setBlockOnAnyFiles(boolean blocked) { blockOnAnyFiles = blocked; } + public void blockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + + public void setBlockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + public void setBlockAndFailOnWriteSnapFiles(boolean blocked) { blockAndFailOnWriteSnapFile = blocked; } @@ -306,6 +318,7 @@ private synchronized boolean blockExecution() { boolean wasBlocked = false; try { while (blockOnDataFiles + || blockOnSegmentFiles || blockOnAnyFiles || blockAndFailOnWriteIndexFile || blockOnWriteIndexFile @@ -407,6 +420,8 @@ private void maybeIOExceptionOrBlock(String blobName) throws IOException { blockExecutionAndMaybeWait(blobName); } else if (blobName.startsWith("snap-") && blockAndFailOnWriteSnapFile) { blockExecutionAndFail(blobName); + } else if (blockOnSegmentFiles && blobName.contains(".si__")) { + blockExecutionAndMaybeWait(blobName); } } } diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java index f5d358a162bd1..10e782e6af8da 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java @@ -41,8 +41,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support, + *

                            + * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization, XContent-based serialization and is diffable. * Writable serialization, XContent-based serialization and is diffable. */ public abstract class AbstractDiffableSerializationTestCase & ToXContent> extends AbstractSerializingTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java index ff7c39cd8f102..3f97f0704d733 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java @@ -40,8 +40,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support, + *

                            + * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization and is diffable. * Writable serialization and is diffable. */ public abstract class AbstractDiffableWireSerializationTestCase> extends AbstractWireSerializingTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index 47fe85d28975f..afd93e1b72fbb 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -213,7 +213,7 @@ public void testUnknownObjectException() throws IOException { /** * Traverses the json tree of the valid query provided as argument and mutates it one or more times by adding one object within each * object encountered. - * + *

                            * For instance given the following valid term query: * { * "term" : { @@ -222,7 +222,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

                            * The following two mutations will be generated, and an exception is expected when trying to parse them: * { * "term" : { @@ -233,7 +233,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

                            * { * "term" : { * "field" : { @@ -243,7 +243,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

                            * Every mutation is then added to the list of results with a boolean flag indicating if a parsing exception is expected or not * for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the * arbitraryMarkers parameter. @@ -768,7 +768,7 @@ protected static String randomMinimumShouldMatch() { /** * Call this method to check a valid json string representing the query under test against * it's generated json. - * + *

                            * Note: By the time of this writing (Nov 2015) all queries are taken from the query dsl * reference docs mirroring examples there. Here's how the queries were generated: * diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java index 9a4363dc4d946..64c5d916d55d2 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java @@ -124,9 +124,9 @@ protected final T copyInstance(T instance) throws IOException { /** * Get the {@link NamedWriteableRegistry} to use when de-serializing the object. - * + *

                            * Override this method if you need to register {@link NamedWriteable}s for the test object to de-serialize. - * + *

                            * By default this will return a {@link NamedWriteableRegistry} with no registered {@link NamedWriteable}s */ protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java index 0dce5e78bf91f..67522bb618cf1 100644 --- a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java @@ -121,7 +121,7 @@ public static void corruptFile(Random random, Path... files) throws IOException } } - static void corruptAt(Path path, FileChannel channel, int position) throws IOException { + public static void corruptAt(Path path, FileChannel channel, int position) throws IOException { // read channel.position(position); long filePointer = channel.position(); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index d3e24ccd90500..952cd6c085966 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1816,7 +1816,7 @@ public synchronized void stopCurrentClusterManagerNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. */ - public synchronized void stopRandomNonClusterManagerNode() throws IOException { + public synchronized void stopRandomNodeNotCurrentClusterManager() throws IOException { NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName()).negate()); if (nodeAndClient != null) { logger.info( @@ -1841,11 +1841,11 @@ public synchronized void stopCurrentMasterNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNonClusterManagerNode()} + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNodeNotCurrentClusterManager()} */ @Deprecated - public synchronized void stopRandomNonMasterNode() throws IOException { - stopRandomNonClusterManagerNode(); + public synchronized void stopRandomNodeNotCurrentMaster() throws IOException { + stopRandomNodeNotCurrentClusterManager(); } /** @@ -1853,14 +1853,16 @@ public synchronized void stopRandomNonMasterNode() throws IOException { */ public void stopAllNodes() { try { - int totalDataNodes = numDataNodes(); - while (totalDataNodes > 0) { - stopRandomDataNode(); - totalDataNodes -= 1; + if (numDataAndClusterManagerNodes() != numClusterManagerNodes()) { + int totalDataNodes = numDataNodes(); + while (totalDataNodes > 0) { + stopRandomDataNode(); + totalDataNodes -= 1; + } } int totalClusterManagerNodes = numClusterManagerNodes(); while (totalClusterManagerNodes > 1) { - stopRandomNonClusterManagerNode(); + stopRandomNodeNotCurrentClusterManager(); totalClusterManagerNodes -= 1; } stopCurrentClusterManagerNode(); @@ -1869,6 +1871,18 @@ public void stopAllNodes() { } } + /** + * Replace all nodes by stopping all current node and starting new node. + * Used for remote store test cases, where remote state is restored. + */ + public void resetCluster() { + int totalClusterManagerNodes = numClusterManagerNodes(); + int totalDataNodes = numDataNodes(); + stopAllNodes(); + startClusterManagerOnlyNodes(totalClusterManagerNodes); + startDataOnlyNodes(totalDataNodes); + } + private synchronized void startAndPublishNodesAndClients(List nodeAndClients) { if (nodeAndClients.size() > 0) { final int newClusterManagers = (int) nodeAndClients.stream() @@ -2718,6 +2732,9 @@ public void ensureEstimatedStats() { false, false, false, + false, + false, + false, false ); assertThat( diff --git a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java index c27f3f169fbae..3d829d77dd323 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java @@ -44,7 +44,7 @@ /** * Some tests rely on the keyword tokenizer, but this tokenizer isn't part of lucene-core and therefor not available * in some modules. What this test plugin does, is use the mock tokenizer and advertise that as the keyword tokenizer. - * + *

                            * Most tests that need this test plugin use normalizers. When normalizers are constructed they try to resolve the * keyword tokenizer, but if the keyword tokenizer isn't available then constructing normalizers will fail. */ diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6e064f943ca07..ad27d9834f159 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -130,9 +130,9 @@ import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -500,7 +500,7 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put( - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), (random.nextBoolean() ? random.nextDouble() : random.nextBoolean()).toString() ); } @@ -789,6 +789,30 @@ protected Settings featureFlagSettings() { return featureSettings.build(); } + /** + * Represent if it needs to trigger remote state restore or not. + * For tests with remote store enabled domain, it will be overridden to true. + * + * @return if needs to perform remote state restore or not + */ + protected boolean triggerRemoteStateRestore() { + return false; + } + + /** + * For tests with remote cluster state, it will reset the cluster and cluster state will be + * restored from remote. + */ + protected void performRemoteStoreTestAction() { + if (triggerRemoteStateRestore()) { + String clusterUUIDBefore = clusterService().state().metadata().clusterUUID(); + internalCluster().resetCluster(); + String clusterUUIDAfter = clusterService().state().metadata().clusterUUID(); + // assertion that UUID is changed post restore. + assertFalse(clusterUUIDBefore.equals(clusterUUIDAfter)); + } + } + /** * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices * already exists this method will fail and wipe all the indices created so far. @@ -1343,7 +1367,7 @@ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean l /** * Ensures that all nodes in the cluster are connected to each other. - * + *

                            * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster @@ -1928,6 +1952,7 @@ protected Settings nodeSettings(int nodeOrdinal) { // Enable tracer only when Telemetry Setting is enabled if (featureFlagSettings().getAsBoolean(FeatureFlags.TELEMETRY_SETTING.getKey(), false)) { + builder.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { @@ -2311,11 +2336,11 @@ public static void afterClass() throws Exception { private static void initializeSuiteScope() throws Exception { Class targetClass = getTestClass(); - /** - * Note we create these test class instance via reflection - * since JUnit creates a new instance per test and that is also - * the reason why INSTANCE is static since this entire method - * must be executed in a static context. + /* + Note we create these test class instance via reflection + since JUnit creates a new instance per test and that is also + the reason why INSTANCE is static since this entire method + must be executed in a static context. */ assert INSTANCE == null; if (isSuiteScopedTest(targetClass)) { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index f14fe3bf3961c..efc29d1c254e6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -254,6 +254,7 @@ private Node newNode() { .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) + .put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these .put(featureFlagSettings); if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { @@ -271,6 +272,7 @@ private Node newNode() { plugins.add(MockHttpTransport.TestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); + plugins.add(MockTelemetryPlugin.class); Node node = new MockNode(settingsBuilder.build(), plugins, forbidPrivateIndexSettings()); try { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java index e853c1e6314e1..9f7f33b351be3 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java @@ -48,9 +48,9 @@ @TimeoutSuite(millis = TimeUnits.HOUR) @LuceneTestCase.SuppressReproduceLine @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -/** - * Basic test case for token streams. the assertion methods in this class will - * run basic checks to enforce correct behavior of the token streams. +/* + Basic test case for token streams. the assertion methods in this class will + run basic checks to enforce correct behavior of the token streams. */ public abstract class OpenSearchTokenStreamTestCase extends BaseTokenStreamTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 61742cd4fb827..8c41e6e5d5b38 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -42,10 +42,12 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.Closeable; @@ -253,7 +255,18 @@ public void wipeRepositories(String... repositories) { } for (String repository : repositories) { try { - client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + List repositoryMetadata = client().admin() + .cluster() + .prepareGetRepositories(repository) + .execute() + .actionGet() + .repositories(); + if (repositoryMetadata.isEmpty() == false + && BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.get(repositoryMetadata.get(0).settings()) == true) { + client().admin().cluster().prepareCleanupRepository(repository).execute().actionGet(); + } else { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } } catch (RepositoryMissingException ex) { // ignore } @@ -263,7 +276,7 @@ public void wipeRepositories(String... repositories) { /** * Ensures that any breaker statistics are reset to 0. - * + *

                            * The implementation is specific to the test cluster, because the act of * checking some breaker stats can increase them. */ diff --git a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java index 0cb39a29e8159..2c535211ca456 100644 --- a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java @@ -164,10 +164,10 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * This method takes the input xContent data and adds a random field value, inner object or array into each * json object. This can e.g. be used to test if parsers that handle the resulting xContent can handle the * augmented xContent correctly, for example when testing lenient parsing. - * + *

                            * If the xContent output contains objects that should be skipped of such treatment, an optional filtering * {@link Predicate} can be supplied that checks xContent paths that should be excluded from this treatment. - * + *

                            * This predicate should check the xContent path that we want to insert to and return {@code true} if the * path should be excluded. Paths are string concatenating field names and array indices, so e.g. in: * @@ -188,7 +188,7 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * * * "foo1.bar.2.baz" would point to the desired insert location. - * + *

                            * To exclude inserting into the "foo1" object we would user a {@link Predicate} like *

                                  * {@code
                            @@ -257,12 +257,12 @@ public static BytesReference insertRandomFields(
                                  * This utility method takes an XContentParser and walks the xContent structure to find all
                                  * possible paths to where a new object or array starts. This can be used in tests that add random
                                  * xContent values to test parsing code for errors or to check their robustness against new fields.
                            -     *
                            +     * 

                            * The path uses dot separated fieldnames and numbers for array indices, similar to what we do in * {@link ObjectPath}. - * + *

                            * The {@link Stack} passed in should initially be empty, it gets pushed to by recursive calls - * + *

                            * As an example, the following json xContent: *

                                  *     {
                            diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
                            index a0e87d5fd7189..45d779b3e8697 100644
                            --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
                            +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
                            @@ -47,7 +47,7 @@
                             /**
                              * Client that always responds with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}
                              * for testing.
                            - *
                            + * 

                            * See also {@link NoOpNodeClient} if you need to mock a {@link org.opensearch.client.node.NodeClient}. */ public class NoOpClient extends AbstractClient { diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java index 7dfe9298e9a92..4e84fe3b91d15 100644 --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java @@ -55,7 +55,7 @@ * Client that always response with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}, * {@link #executeLocally(ActionType, ActionRequest, ActionListener)}, or {@link #executeLocally(ActionType, ActionRequest, TaskListener)} * for testing. - * + *

                            * See also {@link NoOpClient} if you do not specifically need a {@link NodeClient}. */ public class NoOpNodeClient extends NodeClient { diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java index 690e15dd80873..44837c37962b4 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java @@ -248,7 +248,7 @@ public TimeValue expectedTimeToHeal() { /** * resolves all threads belonging to given node and suspends them if their current stack trace * is "safe". Threads are added to nodeThreads if suspended. - * + *

                            * returns true if some live threads were found. The caller is expected to call this method * until no more "live" are found. */ diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java index e77b8f5b24897..62e19750a363b 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java @@ -109,7 +109,7 @@ public void ensureHealthy(InternalTestCluster cluster) { /** * Ensures that all nodes in the cluster are connected to each other. - * + *

                            * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index 107e42ce43487..7462062a0cd46 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -57,13 +57,13 @@ * A gateway allocator implementation that keeps an in memory list of started shard allocation * that are used as replies to the, normally async, fetch data requests. The in memory list * is adapted when shards are started and failed. - * + *

                            * Nodes leaving and joining the cluster do not change the list of shards the class tracks but * rather serves as a filter to what is returned by fetch data. Concretely - fetch data will * only return shards that were started on nodes that are currently part of the cluster. - * + *

                            * For now only primary shard related data is fetched. Replica request always get an empty response. - * + *

                            * * This class is useful to use in unit tests that require the functionality of {@link GatewayAllocator} but do * not have all the infrastructure required to use it. diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java index f81c47b6a0a08..97fdcb796f195 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level when investigating test failures. Do not use this annotation to explicitly * control the logging level in tests; instead, use {@link TestLogging}. - * + *

                            * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java index d440fae409897..6c0b87ac67354 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level for controlling logging behavior in tests. Do not use this annotation when * investigating test failures; instead, use {@link TestIssueLogging}. - * + *

                            * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java index 7c94c16b77471..ea2c5d055ed8b 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java @@ -53,7 +53,7 @@ * A {@link RunListener} that allows changing the log level for a specific test method. When a test method is annotated with the * {@link TestLogging} annotation, the level for the specified loggers will be internally saved before the test method execution and * overridden with the specified ones. At the end of the test method execution the original loggers levels will be restored. - * + *

                            * This class is not thread-safe. Given the static nature of the logging API, it assumes that tests are never run concurrently in the same * JVM. For the very same reason no synchronization has been implemented regarding the save/restore process of the original loggers * levels. diff --git a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java index dc13924195254..a77865579f3b3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java @@ -103,7 +103,7 @@ protected void dispatchRequest(RestRequest request) { /** * A mocked {@link NodeClient} which can be easily reconfigured to verify arbitrary verification * functions, and can be reset to allow reconfiguration partway through a test without having to construct a new object. - * + *

                            * By default, will throw {@link AssertionError} when any execution method is called, unless configured otherwise using * {@link #setExecuteVerifier(BiFunction)} or {@link #setExecuteLocallyVerifier(BiFunction)}. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java index eeaa76b6ca1b3..3a80f25c5d4ab 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java @@ -35,7 +35,7 @@ /** * Matches denylist patterns. - * + *

                            * Currently the following syntax is supported: * *

                              diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java index 10fb1e52259a9..8e0bc03b08442 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java @@ -41,7 +41,7 @@ * Allows to register additional features supported by the tests runner. * This way any runner can add extra features and use proper skip sections to avoid * breaking others runners till they have implemented the new feature as well. - * + *

                              * Once all runners have implemented the feature, it can be removed from the list * and the related skip sections can be removed from the tests as well. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index c3289e7651056..12599f003077d 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -78,7 +78,7 @@ /** * Runs a suite of yaml tests shared with all the official OpenSearch * clients against an opensearch cluster. - * + *

                              * The suite timeout is extended to account for projects with a large number of tests. */ @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) @@ -110,9 +110,9 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe /** * This separator pattern matches ',' except it is preceded by a '\'. * This allows us to support ',' within paths when it is escaped with a slash. - * + *

                              * For example, the path string "/a/b/c\,d/e/f,/foo/bar,/baz" is separated to "/a/b/c\,d/e/f", "/foo/bar" and "/baz". - * + *

                              * For reference, this regular expression feature is known as zero-width negative look-behind. * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index ef8bf37d88227..ae1c33b1eb47f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -81,7 +81,7 @@ /** * Represents a do section: - * + *

                              * - do: * catch: missing * headers: diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 2132c2ebab51c..8e929eff44348 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a gte assert section: - * + *

                              * - gte: { fields._ttl: 0 } */ public class GreaterThanEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java index 6cfbbfc5df8d5..999486ad04455 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java @@ -45,7 +45,7 @@ /** * Represents an is_false assert section: - * + *

                              * - is_false: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java index e746542d89126..bf5822406f014 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java @@ -46,7 +46,7 @@ /** * Represents an is_true assert section: - * + *

                              * - is_true: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java index 263e0c8fb9c42..d6e2ae1e23996 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java @@ -46,7 +46,7 @@ /** * Represents a lt assert section: - * + *

                              * - lt: { fields._ttl: 20000} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index f0e7d0c01b8f0..ee46c04496f32 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a lte assert section: - * + *

                              * - lte: { fields._ttl: 0 } */ public class LessThanOrEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java index a97e3e4cb77ed..77d8f3154729e 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java @@ -50,7 +50,7 @@ /** * Represents a match assert section: - * + *

                              * - match: { get.fields._routing: "5" } * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java index a561a53119a96..c8004d9807cb9 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java @@ -42,7 +42,7 @@ /** * Represents a set section: - * + *

                              * - set: {_scroll_id: scroll_id} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index 6b428a7f65594..dda413ce2818e 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -10,7 +10,9 @@ import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.noop.NoopCounter; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; @@ -34,6 +36,20 @@ public TracingTelemetry getTracingTelemetry() { @Override public MetricsTelemetry getMetricsTelemetry() { return new MetricsTelemetry() { + @Override + public Counter createCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public void close() { + + } }; } } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index c1795e61096ac..6bf5381b62cc9 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -58,6 +58,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.tasks.TaskManager; import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; @@ -140,7 +141,8 @@ public static MockNioTransport newMockTransport(Settings settings, Version versi new NetworkService(Collections.emptyList()), new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); } diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java index 5795f860efa7e..cd6bf02efef6f 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java @@ -65,6 +65,7 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.Page; import org.opensearch.nio.ServerChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.InboundPipeline; @@ -110,9 +111,10 @@ public MockNioTransport( NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService + CircuitBreakerService circuitBreakerService, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool, settings); } diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java index a596bdd5e419f..deb489614be26 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java @@ -39,6 +39,7 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -57,7 +58,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( MOCK_NIO_TRANSPORT_NAME, @@ -68,7 +70,8 @@ public Map> getTransports( networkService, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService + circuitBreakerService, + tracer ) ); } diff --git a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java index fb77161a02aef..ce401ad99fad7 100644 --- a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.transport.AbstractSimpleTransportTestCase; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; @@ -71,7 +72,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti networkService, new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java index 4b661dc0ad0fe..c5d179f6412a8 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -166,7 +166,9 @@ public Long getEndTime() { } public void setError(Exception exception) { - putMetadata("ERROR", exception.getMessage()); + if (exception != null) { + putMetadata("ERROR", exception.getMessage()); + } } private static class IdGenerator {