diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 40c8fb2e01dd7..b5b0a815b02b2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1,5 @@ # CODEOWNERS manages notifications, not PR approvals -# For PR approvals see /.github/workflows/maintainer-approval.yml +# For PR approvals see /.github/workflows/maintainer-approval.yml # Files have a single rule applied, the last match decides the owner # If you would like to more specifically apply ownership, include existing owner in new sub fields @@ -24,4 +24,4 @@ /.github/ @peternied -/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6281fa0af3e36..5476637b84e92 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -13,15 +13,9 @@ Resolves #[Issue number to be closed when this PR is merged] ### Check List -- [ ] New functionality includes testing. - - [ ] All tests pass -- [ ] New functionality has been documented. - - [ ] New functionality has javadoc added -- [ ] API changes companion pull request [created](https://github.com/opensearch-project/opensearch-api-specification/blob/main/DEVELOPER_GUIDE.md). -- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) -- [ ] Commits are signed per the DCO using --signoff -- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) -- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) +- [ ] Functionality includes testing. +- [ ] API changes companion pull request [created](https://github.com/opensearch-project/opensearch-api-specification/blob/main/DEVELOPER_GUIDE.md), if applicable. +- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose), if applicable. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index d18170e9ea6b7..51ae075ffa2c9 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -17,8 +17,23 @@ jobs: java-version: ${{ matrix.java }} distribution: temurin - name: Setup docker (missing on MacOS) + id: setup_docker if: runner.os == 'macos' uses: douglascamata/setup-docker-macos-action@main + continue-on-error: true + with: + upgrade-qemu: true + colima: v0.6.8 + - name: Run Gradle (assemble) + if: runner.os == 'macos' && steps.setup_docker.outcome != 'success' + run: | + # Report success even if previous step failed (Docker on MacOS runner is very unstable) + exit 0; + - name: Run Gradle (assemble) + if: runner.os != 'macos' + run: | + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE - name: Run Gradle (assemble) + if: runner.os == 'macos' && steps.setup_docker.outcome == 'success' run: | ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml new file mode 100644 index 0000000000000..9580d510fd108 --- /dev/null +++ b/.github/workflows/dco.yml @@ -0,0 +1,19 @@ +name: Developer Certificate of Origin Check + +on: [pull_request] + +jobs: + dco-check: + runs-on: ubuntu-latest + + steps: + - name: Get PR Commits + id: 'get-pr-commits' + uses: tim-actions/get-pr-commits@v1.1.0 + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: DCO Check + uses: tim-actions/dco@v1.1.0 + with: + commits: ${{ steps.get-pr-commits.outputs.commits }} + diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 1f5c187c28e7d..07185ef4c65e3 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -12,13 +12,28 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: + check-files: + runs-on: ubuntu-latest + outputs: + RUN_GRADLE_CHECK: ${{ steps.changed-files-specific.outputs.any_changed }} + steps: + - uses: actions/checkout@v4 + - name: Get changed files + id: changed-files-specific + uses: tj-actions/changed-files@v44 + with: + files_ignore: | + release-notes/*.md + .github/** + *.md + gradle-check: - if: github.repository == 'opensearch-project/OpenSearch' + needs: check-files + if: github.repository == 'opensearch-project/OpenSearch' && needs.check-files.outputs.RUN_GRADLE_CHECK == 'true' permissions: contents: read # to fetch code (actions/checkout) pull-requests: write # to create or update comment (peter-evans/create-or-update-comment) issues: write # To create an issue if check fails on push. - runs-on: ubuntu-latest timeout-minutes: 130 steps: @@ -30,11 +45,31 @@ jobs: - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | + echo "event_name=pull_request_target" >> $GITHUB_ENV + echo "branch_name=$(jq --raw-output .pull_request.base.ref $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --ascii-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "post_merge_action=false" >> $GITHUB_ENV + + # to get the PR data that can be used for post merge actions + - uses: actions/github-script@v7 + if: github.event_name == 'push' + id: get_pr_data + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + return ( + await github.rest.repos.listPullRequestsAssociatedWithCommit({ + commit_sha: context.sha, + owner: context.repo.owner, + repo: context.repo.repo, + }) + ).data[0]; - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -43,11 +78,15 @@ jobs: ref_id=$(git rev-parse HEAD) branch_name=$(git rev-parse --abbrev-ref HEAD) echo "branch_name=$branch_name" >> $GITHUB_ENV + echo "event_name=push" >> $GITHUB_ENV echo "pr_from_sha=$ref_id" >> $GITHUB_ENV echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV - echo "pr_number=Null" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo 'pr_number=${{ fromJson(steps.get_pr_data.outputs.result).number }}' >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --ascii-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "post_merge_action=true" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 @@ -127,3 +166,12 @@ jobs: with: assignees: ${{ github.event.head_commit.author.username }}, ${{ github.triggering_actor }} filename: .github/ISSUE_TEMPLATE/failed_check.md + + check-result: + needs: [check-files, gradle-check] + if: always() + runs-on: ubuntu-latest + steps: + - name: Fail if gradle-check fails + if: ${{ needs.check-files.outputs.RUN_GRADLE_CHECK && needs.gradle-check.result == 'failure' }} + run: exit 1 \ No newline at end of file diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml deleted file mode 100644 index eec363572478c..0000000000000 --- a/.github/workflows/pull-request-checks.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Pull Request Checks - -on: - pull_request: - types: - [ - opened, - edited, - review_requested, - synchronize, - reopened, - ready_for_review, - ] - -jobs: - verify-description-checklist: - name: Verify Description Checklist - runs-on: ubuntu-latest - steps: - - uses: peternied/check-pull-request-description-checklist@v1.1 - if: github.event.pull_request.user.login != 'dependabot[bot]' - with: - checklist-items: | - New functionality includes testing. - All tests pass - New functionality has been documented. - New functionality has javadoc added - Commits are signed per the DCO using --signoff - Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 964383078c38d..1cc12f66d52e1 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed +- Changed locale provider from COMPAT to CLDR ([13988](https://github.com/opensearch-project/OpenSearch/pull/13988)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33de6a5668c0e..1ffb438172f50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,23 +5,40 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333)) - Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423)) - Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) - Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293)) -- [Remote State] Add async remote state deletion task running on a interval, configurable by a setting ([#13131](https://github.com/opensearch-project/OpenSearch/pull/13131)) +- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679)) +- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) +- Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819)) +- [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13131](https://github.com/opensearch-project/OpenSearch/pull/13131)) +- Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776)) +- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304)) ### Dependencies - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) - Bump `org.apache.commons:commons-text` from 1.11.0 to 1.12.0 ([#13557](https://github.com/opensearch-project/OpenSearch/pull/13557)) - Bump `org.hdrhistogram:HdrHistogram` from 2.1.12 to 2.2.1 ([#13556](https://github.com/opensearch-project/OpenSearch/pull/13556)) -- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.3 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641)) +- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.4 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641), [#13753](https://github.com/opensearch-project/OpenSearch/pull/13753)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.6 to 3.4.0 ([#13642](https://github.com/opensearch-project/OpenSearch/pull/13642)) - Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665)) +- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752)) +- Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) +- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) +- Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817)) +- Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) +- Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) +- Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839)) +- Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) - Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481)) +- Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636)) ### Deprecated @@ -29,9 +46,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove handling of index.mapper.dynamic in AutoCreateIndex([#13067](https://github.com/opensearch-project/OpenSearch/pull/13067)) ### Fixed -- Fix negative RequestStats metric issue ([#13553](https://github.com/opensearch-project/OpenSearch/pull/13553)) - Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624)) - Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646)) +- Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710)) +- Don't return negative scores from `multi_match` query with `cross_fields` type ([#13829](https://github.com/opensearch-project/OpenSearch/pull/13829)) +- Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885)) +- Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903)) +- Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911)) ### Security diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 92ef71b92da7e..bc11e7335af49 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -62,6 +62,7 @@ - [LineLint](#linelint) - [Lucene Snapshots](#lucene-snapshots) - [Flaky Tests](#flaky-tests) + - [Gradle Check Metrics Dashboard](#gradle-check-metrics-dashboard) # Developer Guide @@ -660,3 +661,7 @@ If you encounter a build/test failure in CI that is unrelated to the change in y 4. If an existing issue is found, paste a link to the known issue in a comment to your PR. 5. If no existing issue is found, open one. 6. Retry CI via the GitHub UX or by pushing an update to your PR. + +### Gradle Check Metrics Dashboard + +To get the comprehensive insights and analysis of the Gradle Check test failures, visit the [OpenSearch Gradle Check Metrics Dashboard](https://metrics.opensearch.org/_dashboards/app/dashboards#/view/e5e64d40-ed31-11ee-be99-69d1dbc75083). This dashboard is part of the [OpenSearch Metrics Project](https://github.com/opensearch-project/opensearch-metrics) initiative. The dashboard contains multiple data points that can help investigate and resolve flaky failures. Additionally, this dashboard can be used to drill down, slice, and dice the data using multiple supported filters, which further aids in troubleshooting and resolving issues. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index cce92167473b6..6855281a488ca 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -14,6 +14,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | +| Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | diff --git a/build.gradle b/build.gradle index e92f396e006f5..55b31ca816214 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,6 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.25.0" apply false - id "org.gradle.test-retry" version "1.5.9" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } @@ -71,6 +70,13 @@ apply from: 'gradle/run.gradle' apply from: 'gradle/missing-javadoc.gradle' apply from: 'gradle/code-coverage.gradle' +// Disable unconditional publishing of build scans +develocity { + buildScan { + publishing.onlyIf { false } + } +} + // common maven publishing configuration allprojects { group = 'org.opensearch' @@ -462,9 +468,8 @@ gradle.projectsEvaluated { // test retry configuration subprojects { - apply plugin: "org.gradle.test-retry" tasks.withType(Test).configureEach { - retry { + develocity.testRetry { if (BuildParams.isCi()) { maxRetries = 3 maxFailures = 10 diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index 2ea8c2d015ecc..d0cb2da9c1dd3 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -110,7 +110,7 @@ public void execute(Task t) { if (BuildParams.getRuntimeJavaVersion() == JavaVersion.VERSION_1_8) { test.systemProperty("java.locale.providers", "SPI,JRE"); } else { - test.systemProperty("java.locale.providers", "SPI,COMPAT"); + test.systemProperty("java.locale.providers", "SPI,CLDR"); if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) < 0) { test.jvmArgs("--illegal-access=warn"); } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 1e097d586aded..0a36ed5e200f7 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.11.0-snapshot-fb97840 +lucene = 9.11.0-snapshot-4be6531 bundled_jdk_vendor = adoptium bundled_jdk = 21.0.3+9 @@ -7,8 +7,8 @@ bundled_jdk = 21.0.3+9 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.17.0 -jackson_databind = 2.17.0 +jackson = 2.17.1 +jackson_databind = 2.17.1 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 @@ -27,12 +27,12 @@ google_http_client = 1.44.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.109.Final +netty = 4.1.110.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.17 -reactor = 3.5.15 +reactor_netty = 1.1.19 +reactor = 3.5.17 # client dependencies httpclient5 = 5.2.1 diff --git a/client/rest/src/main/java/org/opensearch/client/Request.java b/client/rest/src/main/java/org/opensearch/client/Request.java index 441b01b0891ad..32fedee0c97bf 100644 --- a/client/rest/src/main/java/org/opensearch/client/Request.java +++ b/client/rest/src/main/java/org/opensearch/client/Request.java @@ -110,7 +110,13 @@ public void addParameters(Map paramSource) { * will change it. */ public Map getParameters() { - return unmodifiableMap(parameters); + if (options.getParameters().isEmpty()) { + return unmodifiableMap(parameters); + } else { + Map combinedParameters = new HashMap<>(parameters); + combinedParameters.putAll(options.getParameters()); + return unmodifiableMap(combinedParameters); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java index 189d785faaf45..bbc1f8bc85fcb 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java @@ -40,8 +40,11 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; /** * The portion of an HTTP request to OpenSearch that can be @@ -53,18 +56,21 @@ public final class RequestOptions { */ public static final RequestOptions DEFAULT = new Builder( Collections.emptyList(), + Collections.emptyMap(), HeapBufferedResponseConsumerFactory.DEFAULT, null, null ).build(); private final List
headers; + private final Map parameters; private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private final WarningsHandler warningsHandler; private final RequestConfig requestConfig; private RequestOptions(Builder builder) { this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); + this.parameters = Collections.unmodifiableMap(new HashMap<>(builder.parameters)); this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; this.warningsHandler = builder.warningsHandler; this.requestConfig = builder.requestConfig; @@ -74,7 +80,7 @@ private RequestOptions(Builder builder) { * Create a builder that contains these options but can be modified. */ public Builder toBuilder() { - return new Builder(headers, httpAsyncResponseConsumerFactory, warningsHandler, requestConfig); + return new Builder(headers, parameters, httpAsyncResponseConsumerFactory, warningsHandler, requestConfig); } /** @@ -84,6 +90,14 @@ public List
getHeaders() { return headers; } + /** + * Query parameters to attach to the request. Any parameters present here + * will override matching parameters in the {@link Request}, if they exist. + */ + public Map getParameters() { + return parameters; + } + /** * The {@link HttpAsyncResponseConsumerFactory} used to create one * {@link AsyncResponseConsumer} callback per retry. Controls how the @@ -142,6 +156,12 @@ public String toString() { b.append(headers.get(h).toString()); } } + if (parameters.size() > 0) { + if (comma) b.append(", "); + comma = true; + b.append("parameters="); + b.append(parameters.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining(","))); + } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { if (comma) b.append(", "); comma = true; @@ -170,6 +190,7 @@ public boolean equals(Object obj) { RequestOptions other = (RequestOptions) obj; return headers.equals(other.headers) + && parameters.equals(other.parameters) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory) && Objects.equals(warningsHandler, other.warningsHandler); } @@ -179,7 +200,7 @@ public boolean equals(Object obj) { */ @Override public int hashCode() { - return Objects.hash(headers, httpAsyncResponseConsumerFactory, warningsHandler); + return Objects.hash(headers, parameters, httpAsyncResponseConsumerFactory, warningsHandler); } /** @@ -189,17 +210,20 @@ public int hashCode() { */ public static class Builder { private final List
headers; + private final Map parameters; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private WarningsHandler warningsHandler; private RequestConfig requestConfig; private Builder( List
headers, + Map parameters, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, WarningsHandler warningsHandler, RequestConfig requestConfig ) { this.headers = new ArrayList<>(headers); + this.parameters = new HashMap<>(parameters); this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; this.warningsHandler = warningsHandler; this.requestConfig = requestConfig; @@ -226,6 +250,21 @@ public Builder addHeader(String name, String value) { return this; } + /** + * Add the provided query parameter to the request. Any parameters added here + * will override matching parameters in the {@link Request}, if they exist. + * + * @param name the query parameter name + * @param value the query parameter value + * @throws NullPointerException if {@code name} or {@code value} is null. + */ + public Builder addParameter(String name, String value) { + Objects.requireNonNull(name, "query parameter name cannot be null"); + Objects.requireNonNull(value, "query parameter value cannot be null"); + this.parameters.put(name, value); + return this; + } + /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one * {@link AsyncResponseConsumer} callback per retry. Controls how the diff --git a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java index a7f9a48c73393..06fc92559c2d3 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java @@ -39,12 +39,15 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -90,6 +93,39 @@ public void testAddHeader() { } } + public void testAddParameter() { + assertThrows( + "query parameter name cannot be null", + NullPointerException.class, + () -> randomBuilder().addParameter(null, randomAsciiLettersOfLengthBetween(3, 10)) + ); + + assertThrows( + "query parameter value cannot be null", + NullPointerException.class, + () -> randomBuilder().addParameter(randomAsciiLettersOfLengthBetween(3, 10), null) + ); + + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + int numParameters = between(0, 5); + Map parameters = new HashMap<>(); + for (int i = 0; i < numParameters; i++) { + String name = randomAsciiAlphanumOfLengthBetween(5, 10); + String value = randomAsciiAlphanumOfLength(3); + parameters.put(name, value); + builder.addParameter(name, value); + } + RequestOptions options = builder.build(); + assertEquals(parameters, options.getParameters()); + + try { + options.getParameters().put(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); + fail("expected failure"); + } catch (UnsupportedOperationException e) { + assertNull(e.getMessage()); + } + } + public void testSetHttpAsyncResponseConsumerFactory() { try { RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(null); @@ -145,6 +181,13 @@ static RequestOptions.Builder randomBuilder() { } } + if (randomBoolean()) { + int queryParamCount = between(1, 5); + for (int i = 0; i < queryParamCount; i++) { + builder.addParameter(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + } + if (randomBoolean()) { builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); } diff --git a/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 deleted file mode 100644 index 9b906dbda1656..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index fbd13f03af814..211b3bd55da60 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.0" + id "com.netflix.nebula.ospackage-base" version "11.9.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index 726c381db09f6..af7138569972a 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -105,13 +105,8 @@ private static String javaLocaleProviders() { SPI setting is used to allow loading custom CalendarDataProvider in jdk8 it has to be loaded from jre/lib/ext, in jdk9+ it is already within ES project and on a classpath - - Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - parsing will break in an incompatible way for some date patterns and locales. - //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ - return "-Djava.locale.providers=SPI,COMPAT"; + return "-Djava.locale.providers=SPI,CLDR"; } } diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index 7c359ed2b652c..4e8c5b98116c1 100644 --- a/gradle.properties +++ b/gradle.properties @@ -22,7 +22,7 @@ org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m \ options.forkOptions.memoryMaximumSize=3g # Disable Gradle Enterprise Gradle plugin's test retry -systemProp.gradle.enterprise.testretry.enabled=false +systemProp.develocity.testretry.enabled.enabled=false # Disable duplicate project id detection # See https://docs.gradle.org/current/userguide/upgrading_version_6.html#duplicate_project_names_may_cause_publication_to_fail diff --git a/gradle/ide.gradle b/gradle/ide.gradle index 14d6b2982ccd0..4c4f3b07836c5 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -81,7 +81,7 @@ if (System.getProperty('idea.active') == 'true') { } runConfigurations { defaults(JUnit) { - vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { vmParameters += ' -Djava.security.manager=allow' } diff --git a/libs/core/licenses/jackson-core-2.17.0.jar.sha1 b/libs/core/licenses/jackson-core-2.17.0.jar.sha1 deleted file mode 100644 index 9b906dbda1656..0000000000000 --- a/libs/core/licenses/jackson-core-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..943a9b2fd214b --- /dev/null +++ b/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +3c2361bd633374ae3814b175cc25ccf773f67026 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 88309bc46411a..0000000000000 --- a/libs/core/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53a828e3e88f55c83979cd3df0704617cc9edb9a \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java index 976f353100c55..552945d085884 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java @@ -157,6 +157,9 @@ public static XContentBuilder builder(XContent xContent, Set includes, S /** * Returns a string representation of the builder (only applicable for text based xcontent). + * Note: explicitly or implicitly (from debugger) calling toString() could cause XContentBuilder + * to close which is a side effect done by @see BytesReference#bytes(). + * Trying to write more contents after toString() will cause NPE. Use it with caution. */ @Override public String toString() { diff --git a/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 deleted file mode 100644 index 9b906dbda1656..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 deleted file mode 100644 index 382e20d3d31c1..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6833c8573452d583e4af650a7424d547606b2501 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..ff42ed1f92cfe --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 @@ -0,0 +1 @@ +ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 deleted file mode 100644 index d117479166d17..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f10183857607fde789490d33ea46372a2d2b0c72 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..47d19067cf2a6 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 @@ -0,0 +1 @@ +89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 deleted file mode 100644 index 35242eed9b212..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57a963c6258c49febc11390082d8503f71bb15a9 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7946e994c7104 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 @@ -0,0 +1 @@ +b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java similarity index 99% rename from modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java rename to modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java index bfc184cff0566..02be0990eb136 100644 --- a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java @@ -65,7 +65,7 @@ protected Collection> nodePlugins() { return Arrays.asList(TieredSpilloverCachePlugin.class, MockDiskCachePlugin.class); } - private Settings defaultSettings(String onHeapCacheSizeInBytesOrPecentage) { + static Settings defaultSettings(String onHeapCacheSizeInBytesOrPercentage) { return Settings.builder() .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( @@ -88,7 +88,7 @@ private Settings defaultSettings(String onHeapCacheSizeInBytesOrPecentage) { OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) .get(MAXIMUM_SIZE_IN_BYTES_KEY) .getKey(), - onHeapCacheSizeInBytesOrPecentage + onHeapCacheSizeInBytesOrPercentage ) .build(); } diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java new file mode 100644 index 0000000000000..537caccbac652 --- /dev/null +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java @@ -0,0 +1,501 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.service.NodeCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +// Use a single data node to simplify accessing cache stats across different shards. +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class TieredSpilloverCacheStatsIT extends OpenSearchIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(TieredSpilloverCachePlugin.class, TieredSpilloverCacheIT.MockDiskCachePlugin.class); + } + + private final String HEAP_CACHE_SIZE_STRING = "10000B"; + private final int HEAP_CACHE_SIZE = 10_000; + private final String index1Name = "index1"; + private final String index2Name = "index2"; + + /** + * Test aggregating by indices + */ + public void testIndicesLevelAggregation() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + ImmutableCacheStatsHolder allLevelsStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, TIER_DIMENSION_NAME) + ); + ImmutableCacheStatsHolder indicesOnlyStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME) + ); + + // Get values for indices alone, assert these match for statsHolders that have additional dimensions vs. a statsHolder that only has + // the indices dimension + ImmutableCacheStats index1ExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex1") + values.get("hitsOnDiskIndex1"), + values.get("itemsOnDiskIndex1AfterTest") + values.get("itemsOnHeapIndex1AfterTest"), + 0, + (values.get("itemsOnDiskIndex1AfterTest") + values.get("itemsOnHeapIndex1AfterTest")) * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex1AfterTest") + values.get("itemsOnHeapIndex1AfterTest") + ) + ); + ImmutableCacheStats index2ExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex2") + values.get("hitsOnDiskIndex2"), + values.get("itemsOnDiskIndex2AfterTest") + values.get("itemsOnHeapIndex2AfterTest"), + 0, + (values.get("itemsOnDiskIndex2AfterTest") + values.get("itemsOnHeapIndex2AfterTest")) * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex2AfterTest") + values.get("itemsOnHeapIndex2AfterTest") + ) + ); + + for (ImmutableCacheStatsHolder statsHolder : List.of(allLevelsStatsHolder, indicesOnlyStatsHolder)) { + assertEquals(index1ExpectedStats, statsHolder.getStatsForDimensionValues(List.of(index1Name))); + assertEquals(index2ExpectedStats, statsHolder.getStatsForDimensionValues(List.of(index2Name))); + } + } + + /** + * Test aggregating by indices and tier + */ + public void testIndicesAndTierLevelAggregation() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + ImmutableCacheStatsHolder allLevelsStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, TIER_DIMENSION_NAME) + ); + + // Get values broken down by indices+tiers + ImmutableCacheStats index1HeapExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex1"), + values.get("itemsOnHeapIndex1AfterTest") + values.get("itemsOnDiskIndex1AfterTest") + values.get("hitsOnDiskIndex1"), + values.get("itemsOnDiskIndex1AfterTest"), + values.get("itemsOnHeapIndex1AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnHeapIndex1AfterTest") + ) + ); + assertEquals( + index1HeapExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index1Name, TIER_DIMENSION_VALUE_ON_HEAP)) + ); + + ImmutableCacheStats index2HeapExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex2"), + values.get("itemsOnHeapIndex2AfterTest") + values.get("itemsOnDiskIndex2AfterTest") + values.get("hitsOnDiskIndex2"), + values.get("itemsOnDiskIndex2AfterTest"), + values.get("itemsOnHeapIndex2AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnHeapIndex2AfterTest") + ) + ); + assertEquals( + index2HeapExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index2Name, TIER_DIMENSION_VALUE_ON_HEAP)) + ); + + ImmutableCacheStats index1DiskExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnDiskIndex1"), + values.get("itemsOnHeapIndex1AfterTest") + values.get("itemsOnDiskIndex1AfterTest"), + 0, + values.get("itemsOnDiskIndex1AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex1AfterTest") + ) + ); + assertEquals( + index1DiskExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index1Name, TIER_DIMENSION_VALUE_DISK)) + ); + + ImmutableCacheStats index2DiskExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnDiskIndex2"), + values.get("itemsOnHeapIndex2AfterTest") + values.get("itemsOnDiskIndex2AfterTest"), + 0, + values.get("itemsOnDiskIndex2AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex2AfterTest") + ) + ); + assertEquals( + index2DiskExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index2Name, TIER_DIMENSION_VALUE_DISK)) + ); + } + + /** + * Test aggregating by tier only + */ + public void testTierLevelAggregation() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + // Get values for tiers alone and check they add correctly across indices + ImmutableCacheStatsHolder tiersOnlyStatsHolder = getNodeCacheStatsResult(client, List.of(TIER_DIMENSION_NAME)); + ImmutableCacheStats totalHeapExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex1") + values.get("hitsOnHeapIndex2"), + values.get("itemsOnHeapAfterTest") + values.get("itemsOnDiskAfterTest") + values.get("hitsOnDiskIndex1") + values.get( + "hitsOnDiskIndex2" + ), + values.get("itemsOnDiskAfterTest"), + values.get("itemsOnHeapAfterTest") * values.get("singleSearchSize"), + values.get("itemsOnHeapAfterTest") + ) + ); + ImmutableCacheStats heapStats = tiersOnlyStatsHolder.getStatsForDimensionValues(List.of(TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(totalHeapExpectedStats, heapStats); + ImmutableCacheStats totalDiskExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnDiskIndex1") + values.get("hitsOnDiskIndex2"), + values.get("itemsOnHeapAfterTest") + values.get("itemsOnDiskAfterTest"), + 0, + values.get("itemsOnDiskAfterTest") * values.get("singleSearchSize"), + values.get("itemsOnDiskAfterTest") + ) + ); + ImmutableCacheStats diskStats = tiersOnlyStatsHolder.getStatsForDimensionValues(List.of(TIER_DIMENSION_VALUE_DISK)); + assertEquals(totalDiskExpectedStats, diskStats); + } + + public void testInvalidLevelsAreIgnored() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + ImmutableCacheStatsHolder allLevelsStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, TIER_DIMENSION_NAME) + ); + ImmutableCacheStatsHolder indicesOnlyStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME) + ); + + // Test invalid levels are ignored and permuting the order of levels in the request doesn't matter + + // This should be equivalent to just "indices" + ImmutableCacheStatsHolder indicesEquivalentStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, "unrecognized_dimension") + ); + assertEquals(indicesOnlyStatsHolder, indicesEquivalentStatsHolder); + + // This should be equivalent to "indices", "tier" + ImmutableCacheStatsHolder indicesAndTierEquivalentStatsHolder = getNodeCacheStatsResult( + client, + List.of(TIER_DIMENSION_NAME, "unrecognized_dimension_1", IndicesRequestCache.INDEX_DIMENSION_NAME, "unrecognized_dimension_2") + ); + assertEquals(allLevelsStatsHolder, indicesAndTierEquivalentStatsHolder); + + // This should be equivalent to no levels passed in + ImmutableCacheStatsHolder noLevelsEquivalentStatsHolder = getNodeCacheStatsResult( + client, + List.of("unrecognized_dimension_1", "unrecognized_dimension_2") + ); + ImmutableCacheStatsHolder noLevelsStatsHolder = getNodeCacheStatsResult(client, List.of()); + assertEquals(noLevelsStatsHolder, noLevelsEquivalentStatsHolder); + } + + /** + * Check the new stats API returns the same values as the old stats API. + */ + public void testStatsMatchOldApi() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + String index = "index"; + Client client = client(); + startIndex(client, index); + + // First search one time to see how big a single value will be + searchIndex(client, index, 0); + // get total stats + long singleSearchSize = getTotalStats(client).getSizeInBytes(); + // Select numbers so we get some values on both heap and disk + int itemsOnHeap = HEAP_CACHE_SIZE / (int) singleSearchSize; + int itemsOnDisk = 1 + randomInt(30); // The first one we search (to get the size) always goes to disk + int expectedEntries = itemsOnHeap + itemsOnDisk; + + for (int i = 1; i < expectedEntries; i++) { + // Cause misses + searchIndex(client, index, i); + } + int expectedMisses = itemsOnHeap + itemsOnDisk; + + // Cause some hits + int expectedHits = randomIntBetween(itemsOnHeap, expectedEntries); // Select it so some hits come from both tiers + for (int i = 0; i < expectedHits; i++) { + searchIndex(client, index, i); + } + + ImmutableCacheStats totalStats = getNodeCacheStatsResult(client, List.of()).getTotalStats(); + + // Check the new stats API values are as expected + assertEquals( + new ImmutableCacheStats(expectedHits, expectedMisses, 0, expectedEntries * singleSearchSize, expectedEntries), + totalStats + ); + // Now check the new stats API values for the cache as a whole match the old stats API values + RequestCacheStats oldAPIStats = client.admin() + .indices() + .prepareStats(index) + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache(); + assertEquals(oldAPIStats.getHitCount(), totalStats.getHits()); + assertEquals(oldAPIStats.getMissCount(), totalStats.getMisses()); + assertEquals(oldAPIStats.getEvictions(), totalStats.getEvictions()); + assertEquals(oldAPIStats.getMemorySizeInBytes(), totalStats.getSizeInBytes()); + } + + private void startIndex(Client client, String indexName) throws InterruptedException { + assertAcked( + client.admin() + .indices() + .prepareCreate(indexName) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .get() + ); + indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello")); + ensureSearchable(indexName); + } + + private Map setupCacheForAggregationTests(Client client) throws Exception { + startIndex(client, index1Name); + startIndex(client, index2Name); + + // First search one time to see how big a single value will be + searchIndex(client, index1Name, 0); + // get total stats + long singleSearchSize = getTotalStats(client).getSizeInBytes(); + int itemsOnHeapAfterTest = HEAP_CACHE_SIZE / (int) singleSearchSize; // As the heap tier evicts, the items on it after the test will + // be the same as its max capacity + int itemsOnDiskAfterTest = 1 + randomInt(30); // The first one we search (to get the size) always goes to disk + + // Put some values on heap and disk for each index + int itemsOnHeapIndex1AfterTest = randomInt(itemsOnHeapAfterTest); + int itemsOnHeapIndex2AfterTest = itemsOnHeapAfterTest - itemsOnHeapIndex1AfterTest; + int itemsOnDiskIndex1AfterTest = 1 + randomInt(itemsOnDiskAfterTest - 1); + // The first one we search (to get the size) always goes to disk + int itemsOnDiskIndex2AfterTest = itemsOnDiskAfterTest - itemsOnDiskIndex1AfterTest; + int hitsOnHeapIndex1 = randomInt(itemsOnHeapIndex1AfterTest); + int hitsOnDiskIndex1 = randomInt(itemsOnDiskIndex1AfterTest); + int hitsOnHeapIndex2 = randomInt(itemsOnHeapIndex2AfterTest); + int hitsOnDiskIndex2 = randomInt(itemsOnDiskIndex2AfterTest); + + // Put these values into a map so tests can know what to expect in stats responses + Map expectedValues = new HashMap<>(); + expectedValues.put("itemsOnHeapIndex1AfterTest", itemsOnHeapIndex1AfterTest); + expectedValues.put("itemsOnHeapIndex2AfterTest", itemsOnHeapIndex2AfterTest); + expectedValues.put("itemsOnDiskIndex1AfterTest", itemsOnDiskIndex1AfterTest); + expectedValues.put("itemsOnDiskIndex2AfterTest", itemsOnDiskIndex2AfterTest); + expectedValues.put("hitsOnHeapIndex1", hitsOnHeapIndex1); + expectedValues.put("hitsOnDiskIndex1", hitsOnDiskIndex1); + expectedValues.put("hitsOnHeapIndex2", hitsOnHeapIndex2); + expectedValues.put("hitsOnDiskIndex2", hitsOnDiskIndex2); + expectedValues.put("singleSearchSize", (int) singleSearchSize); + expectedValues.put("itemsOnDiskAfterTest", itemsOnDiskAfterTest); + expectedValues.put("itemsOnHeapAfterTest", itemsOnHeapAfterTest); // Can only pass 10 keys in Map.of() constructor + + // The earliest items (0 - itemsOnDiskAfterTest) are the ones which get evicted to disk + for (int i = 1; i < itemsOnDiskIndex1AfterTest; i++) { // Start at 1 as 0 has already been searched + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskIndex1AfterTest; i < itemsOnDiskIndex1AfterTest + itemsOnDiskIndex2AfterTest; i++) { + searchIndex(client, index2Name, i); + } + // The remaining items stay on heap + for (int i = itemsOnDiskAfterTest; i < itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest; i++) { + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest; i < itemsOnDiskAfterTest + itemsOnHeapAfterTest; i++) { + searchIndex(client, index2Name, i); + } + + // Get some hits on all combinations of indices and tiers + for (int i = itemsOnDiskAfterTest; i < itemsOnDiskAfterTest + hitsOnHeapIndex1; i++) { + // heap hits for index 1 + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest; i < itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest + + hitsOnHeapIndex2; i++) { + // heap hits for index 2 + searchIndex(client, index2Name, i); + } + for (int i = 0; i < hitsOnDiskIndex1; i++) { + // disk hits for index 1 + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskIndex1AfterTest; i < itemsOnDiskIndex1AfterTest + hitsOnDiskIndex2; i++) { + // disk hits for index 2 + searchIndex(client, index2Name, i); + } + return expectedValues; + } + + private ImmutableCacheStats returnNullIfAllZero(ImmutableCacheStats expectedStats) { + // If the randomly chosen numbers are such that the expected stats would be 0, we actually have not interacted with the cache for + // this index. + // In this case, we expect the stats holder to have no stats for this node, and therefore we should get null from + // statsHolder.getStatsForDimensionValues(). + // We will not see it in the XContent response. + if (expectedStats.equals(new ImmutableCacheStats(0, 0, 0, 0, 0))) { + return null; + } + return expectedStats; + } + + // Duplicated from CacheStatsAPIIndicesRequestCacheIT.java, as we can't add a dependency on server.internalClusterTest + + private SearchResponse searchIndex(Client client, String index, int searchSuffix) { + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k", "hello" + padWithZeros(4, searchSuffix))) + // pad with zeros so request 0 and request 10 have the same size ("0000" and "0010" instead of "0" and "10") + .get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + return resp; + } + + private String padWithZeros(int finalLength, int inputValue) { + // Avoid forbidden API String.format() + String input = String.valueOf(inputValue); + if (input.length() >= finalLength) { + return input; + } + StringBuilder sb = new StringBuilder(); + while (sb.length() < finalLength - input.length()) { + sb.append('0'); + } + sb.append(input); + return sb.toString(); + } + + private ImmutableCacheStats getTotalStats(Client client) throws IOException { + ImmutableCacheStatsHolder statsHolder = getNodeCacheStatsResult(client, List.of()); + return statsHolder.getStatsForDimensionValues(List.of()); + } + + private static ImmutableCacheStatsHolder getNodeCacheStatsResult(Client client, List aggregationLevels) throws IOException { + CommonStatsFlags statsFlags = new CommonStatsFlags(); + statsFlags.includeAllCacheTypes(); + String[] flagsLevels; + if (aggregationLevels == null) { + flagsLevels = null; + } else { + flagsLevels = aggregationLevels.toArray(new String[0]); + } + statsFlags.setLevels(flagsLevels); + + NodesStatsResponse nodeStatsResponse = client.admin() + .cluster() + .prepareNodesStats("data:true") + .addMetric(NodesStatsRequest.Metric.CACHE_STATS.metricName()) + .setIndices(statsFlags) + .get(); + // Can always get the first data node as there's only one in this test suite + assertEquals(1, nodeStatsResponse.getNodes().size()); + NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); + return ncs.getStatsByCache(CacheType.INDICES_REQUEST_CACHE); + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 9942651ccdd67..63cdbca101f2a 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -119,6 +119,8 @@ public class TieredSpilloverCache implements ICache { .setValueType(builder.cacheConfig.getValueType()) .setSettings(builder.cacheConfig.getSettings()) .setWeigher(builder.cacheConfig.getWeigher()) + .setKeySerializer(builder.cacheConfig.getKeySerializer()) + .setValueSerializer(builder.cacheConfig.getValueSerializer()) .setDimensionNames(builder.cacheConfig.getDimensionNames()) .setStatsTrackingEnabled(false) .build(), @@ -325,6 +327,7 @@ private Function, Tuple> getValueFromTieredCache(boolean void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); + boolean countEvictionTowardsTotal = false; // Don't count this eviction towards the cache's total if it ends up in the disk tier if (caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats @@ -334,21 +337,28 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification // If the value is not going to the disk cache, send this notification to the TSC's removal listener // as the value is leaving the TSC entirely removalListener.onRemoval(notification); + countEvictionTowardsTotal = true; } - updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue()); + updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue(), countEvictionTowardsTotal); } void handleRemovalFromDiskTier(RemovalNotification, V> notification) { // Values removed from the disk tier leave the TSC entirely removalListener.onRemoval(notification); boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); - updateStatsOnRemoval(TIER_DIMENSION_VALUE_DISK, wasEvicted, notification.getKey(), notification.getValue()); + updateStatsOnRemoval(TIER_DIMENSION_VALUE_DISK, wasEvicted, notification.getKey(), notification.getValue(), true); } - void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICacheKey key, V value) { + void updateStatsOnRemoval( + String removedFromTierValue, + boolean wasEvicted, + ICacheKey key, + V value, + boolean countEvictionTowardsTotal + ) { List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, removedFromTierValue); if (wasEvicted) { - statsHolder.incrementEvictions(dimensionValues); + statsHolder.incrementEvictions(dimensionValues, countEvictionTowardsTotal); } statsHolder.decrementItems(dimensionValues); statsHolder.decrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java index d17059e8dee94..b40724430454b 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -105,20 +105,29 @@ public void incrementMisses(List dimensionValues) { internalIncrement(dimensionValues, missIncrementer, true); } + /** + * This method shouldn't be used in this class. Instead, use incrementEvictions(dimensionValues, includeInTotal) + * which specifies whether the eviction should be included in the cache's total evictions, or if it should + * just count towards that tier's evictions. + * @param dimensionValues The dimension values + */ @Override public void incrementEvictions(List dimensionValues) { - final String tierValue = validateTierDimensionValue(dimensionValues); + throw new UnsupportedOperationException( + "TieredSpilloverCacheHolder must specify whether to include an eviction in the total cache stats. Use incrementEvictions(List dimensionValues, boolean includeInTotal)" + ); + } - // If the disk tier is present, only evictions from the disk tier should be included in total values. + /** + * Increment evictions for this set of dimension values. + * @param dimensionValues The dimension values + * @param includeInTotal Whether to include this eviction in the total for the whole cache's evictions + */ + public void incrementEvictions(List dimensionValues, boolean includeInTotal) { + validateTierDimensionValue(dimensionValues); + // If we count this eviction towards the total, we should increment all ancestor nodes. If not, only increment the leaf node. Consumer evictionsIncrementer = (node) -> { - if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) && diskCacheEnabled) { - // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent - // nodes - if (node.isAtLowestLevel()) { - node.incrementEvictions(); - } - } else { - // If disk tier, or on-heap tier with a disabled disk tier, increment the leaf node and its parents + if (includeInTotal || node.isAtLowestLevel()) { node.incrementEvictions(); } }; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index 2058faa5181b1..69e2060f7ea2f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -141,6 +141,10 @@ public MockDiskCacheFactory(long delay, int maxSize, boolean statsTrackingEnable @Override @SuppressWarnings({ "unchecked" }) public ICache create(CacheConfig config, CacheType cacheType, Map cacheFactories) { + // As we can't directly IT with the tiered cache and ehcache, check that we receive non-null serializers, as an ehcache disk + // cache would require. + assert config.getKeySerializer() != null; + assert config.getValueSerializer() != null; return new Builder().setKeySerializer((Serializer) config.getKeySerializer()) .setValueSerializer((Serializer) config.getValueSerializer()) .setMaxSize(maxSize) diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 6d5ee91326338..54b15f236a418 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -16,6 +16,7 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.serializer.Serializer; import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; @@ -32,6 +33,8 @@ import org.junit.Before; import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -166,6 +169,8 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception .setKeyType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken @@ -318,6 +323,8 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { .setKeyType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setDimensionNames(dimensionNames) .setSettings( Settings.builder() @@ -830,6 +837,8 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio .setKeyType(String.class) .setWeigher((k, v) -> 150) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings( Settings.builder() .put( @@ -917,14 +926,14 @@ public void testDiskTierPolicies() throws Exception { MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( keyValueSize, - 100, + keyValueSize * 100, removalListener, Settings.builder() .put( OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) .get(MAXIMUM_SIZE_IN_BYTES_KEY) .getKey(), - onHeapCacheSize * 50 + "b" + onHeapCacheSize * keyValueSize + "b" ) .build(), 0, @@ -946,6 +955,7 @@ public void testDiskTierPolicies() throws Exception { LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(keyValuePairs); + int expectedEvictions = 0; for (String key : keyValuePairs.keySet()) { ICacheKey iCacheKey = getICacheKey(key); Boolean expectedOutput = expectedOutputs.get(key); @@ -958,8 +968,15 @@ public void testDiskTierPolicies() throws Exception { } else { // Should miss as heap tier size = 0 and the policy rejected it assertNull(result); + expectedEvictions++; } } + + // We expect values that were evicted from the heap tier and not allowed into the disk tier by the policy + // to count towards total evictions + assertEquals(keyValuePairs.size(), getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); // Disk tier is large enough for no evictions + assertEquals(expectedEvictions, getTotalStatsSnapshot(tieredSpilloverCache).getEvictions()); } public void testTookTimePolicyFromFactory() throws Exception { @@ -1014,6 +1031,8 @@ public void testTookTimePolicyFromFactory() throws Exception { .setKeyType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings(settings) .setMaxSizeInBytes(onHeapCacheSize * keyValueSize) .setDimensionNames(dimensionNames) @@ -1415,6 +1434,8 @@ private TieredSpilloverCache intializeTieredSpilloverCache( .setSettings(settings) .setDimensionNames(dimensionNames) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings( Settings.builder() .put( @@ -1479,4 +1500,31 @@ private ImmutableCacheStats getStatsSnapshotForTier(TieredSpilloverCache t } return snapshot; } + + private ImmutableCacheStats getTotalStatsSnapshot(TieredSpilloverCache tsc) throws IOException { + ImmutableCacheStatsHolder cacheStats = tsc.stats(new String[0]); + return cacheStats.getStatsForDimensionValues(List.of()); + } + + // Duplicated here from EhcacheDiskCacheTests.java, we can't add a dependency on that plugin + static class StringSerializer implements Serializer { + private final Charset charset = StandardCharsets.UTF_8; + + @Override + public byte[] serialize(String object) { + return object.getBytes(charset); + } + + @Override + public String deserialize(byte[] bytes) { + if (bytes == null) { + return null; + } + return new String(bytes, charset); + } + + public boolean equals(String object, byte[] bytes) { + return object.equals(deserialize(bytes)); + } + } } diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..ee00419f52066 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +8752daf173a642ae02e081cc0398f2ce59278200 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-fb97840.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index f4da6e39aeeb8..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab914b48665f484315b78e4b6787aa42f5966bb6 \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java index 0d498e16154c8..3d48e96117a1c 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java @@ -354,6 +354,9 @@ public void testInvalidAssignment() { assertEquals(iae.getMessage(), "invalid assignment: cannot assign a value to addition operation [+]"); iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("Double.x() = 1;")); assertEquals(iae.getMessage(), "invalid assignment: cannot assign a value to method call [x/0]"); + + expectScriptThrows(UnsupportedOperationException.class, () -> exec("params['modifyingParamsMap'] = 2;")); + expectScriptThrows(UnsupportedOperationException.class, () -> exec("params.modifyingParamsMap = 2;")); } public void testCannotResolveSymbol() { diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml index 3d6db1b781caf..fdbc6de37e3ea 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml @@ -13,3 +13,50 @@ - match: { error.root_cause.0.position.offset: 13 } - match: { error.root_cause.0.position.start: 0 } - match: { error.root_cause.0.position.end: 38 } + +--- +"Test modifying params map from script leads to exception": + - skip: + features: "node_selector" + + - do: + put_script: + id: "except" + body: {"script": {"lang": "painless", "source": "params.that = 3"}} + + - do: + indices.create: + index: "test" + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + this: + type: "integer" + that: + type: "integer" + + - do: + index: + index: "test" + id: 1 + body: {"this": 1, "that": 2} + + - do: + catch: /unsupported_operation_exception/ + node_selector: + version: "2.15.0 - " + update: + index: "test" + id: 1 + body: + script: + id: "except" + params: {"this": 2} + + - match: { error.caused_by.position.offset: 6 } + - match: { error.caused_by.position.start: 0 } + - match: { error.caused_by.position.end: 15 } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 6536d474f5abc..27cef3f7d7251 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -47,6 +47,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.junit.annotations.TestIssueLogging; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; import org.junit.Before; @@ -63,6 +64,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; +@TestIssueLogging(value = "_root:TRACE", issueUrl = "https://github.com/opensearch-project/OpenSearch/issues/9117") public class RepositoryURLClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { public RepositoryURLClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 83c4db80b7798..a8a165df637a2 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -235,11 +235,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..04338d8933590 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +12630ff9c56e2a372ba57f519c579ff9e728208a \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 594733c11402c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f9cd7bec33c8cf3b891976cb674ffe9c97f8747f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..b8da0dacfe9f1 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +752bfc61c7829be6c27d9c1764250196e2c6b06b \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index c46e06b8c87e4..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c244a56bf7cd171a19379c96f1d20c477a34578d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..b58adc03938f3 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +5ca56d42b24498a226cf91f48b94e010b6af5fe2 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index a79c34a127920..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da26df43f2b0d7c2dfecbf208cae0772a5e382ca \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..dea962647d995 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +8eb59a89aa8984457798ccffb8e97e5351bebc1f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index f2b08020be1ad..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f752ffa5ee4697b04643214236138f3defdee2f4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..1259b95a789a5 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +851c1bd99eaef368e84335853dd448e4f56cdbc8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 969a05905eaf0..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73fe44fe755aef72e7293b2ffdd934beb631429d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..8c0d8fd278b89 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +63647085d41ae231733580c20a498ce7c9134ce5 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index fdf0bd39e217e..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2b48831b25e1c7e8f683a63d1505c2d133256d3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..0eb1fb5f2b31f --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +a3ba7dd03b1df9efed08eb544689d51d2be22aa5 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 0042415700453..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -757f8b29f103f82b6fb6948634e93dd497c9d7a8 \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle index 4fc5e44f58c3a..5747624e2fb69 100644 --- a/plugins/cache-ehcache/build.gradle +++ b/plugins/cache-ehcache/build.gradle @@ -24,6 +24,7 @@ versions << [ dependencies { api "org.ehcache:ehcache:${versions.ehcache}" + api "org.slf4j:slf4j-api:${versions.slf4j}" } thirdPartyAudit { @@ -78,10 +79,9 @@ thirdPartyAudit { 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', 'org.osgi.framework.ServiceReference', - 'org.slf4j.Logger', - 'org.slf4j.LoggerFactory', - 'org.slf4j.Marker', - 'org.slf4j.event.Level' + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder' ) } @@ -90,13 +90,3 @@ tasks.named("bundlePlugin").configure { into 'config' } } - -test { - // TODO: Adding permission in plugin-security.policy doesn't seem to work. - systemProperty 'tests.security.manager', 'false' -} - -internalClusterTest { - // TODO: Remove this later once we have a way. - systemProperty 'tests.security.manager', 'false' -} diff --git a/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/cache-ehcache/licenses/slf4j-api-LICENSE.txt b/plugins/cache-ehcache/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..54512cc08d16b --- /dev/null +++ b/plugins/cache-ehcache/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2022 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/plugins/cache-ehcache/licenses/slf4j-api-NOTICE.txt b/plugins/cache-ehcache/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 9a4dce1067b61..b4c62fbf85cb8 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -42,6 +42,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.time.Duration; import java.util.Arrays; import java.util.Iterator; @@ -175,57 +177,60 @@ private EhcacheDiskCache(Builder builder) { @SuppressWarnings({ "rawtypes" }) private Cache buildCache(Duration expireAfterAccess, Builder builder) { - try { - return this.cacheManager.createCache( - this.diskCacheAlias, - CacheConfigurationBuilder.newCacheConfigurationBuilder( - ICacheKey.class, - ByteArrayWrapper.class, - ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) - ).withExpiry(new ExpiryPolicy<>() { - @Override - public Duration getExpiryForCreation(ICacheKey key, ByteArrayWrapper value) { - return INFINITE; - } - - @Override - public Duration getExpiryForAccess(ICacheKey key, Supplier value) { - return expireAfterAccess; - } - - @Override - public Duration getExpiryForUpdate( - ICacheKey key, - Supplier oldValue, - ByteArrayWrapper newValue - ) { - return INFINITE; - } - }) - .withService(getListenerConfiguration(builder)) - .withService( - new OffHeapDiskStoreConfiguration( - this.threadPoolAlias, - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_CONCURRENCY_KEY) - .get(settings), - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + // Creating the cache requires permissions specified in plugin-security.policy + return AccessController.doPrivileged((PrivilegedAction>) () -> { + try { + return this.cacheManager.createCache( + this.diskCacheAlias, + CacheConfigurationBuilder.newCacheConfigurationBuilder( + ICacheKey.class, + ByteArrayWrapper.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(ICacheKey key, ByteArrayWrapper value) { + return INFINITE; + } + + @Override + public Duration getExpiryForAccess(ICacheKey key, Supplier value) { + return expireAfterAccess; + } + + @Override + public Duration getExpiryForUpdate( + ICacheKey key, + Supplier oldValue, + ByteArrayWrapper newValue + ) { + return INFINITE; + } + }) + .withService(getListenerConfiguration(builder)) + .withService( + new OffHeapDiskStoreConfiguration( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_CONCURRENCY_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + ) ) - ) - .withKeySerializer(new KeySerializerWrapper(keySerializer)) - .withValueSerializer(new ByteArrayWrapperSerializer()) + .withKeySerializer(new KeySerializerWrapper(keySerializer)) + .withValueSerializer(new ByteArrayWrapperSerializer()) // We pass ByteArrayWrapperSerializer as ehcache's value serializer. If V is an interface, and we pass its // serializer directly to ehcache, ehcache requires the classes match exactly before/after serialization. // This is not always feasible or necessary, like for BytesReference. So, we handle the value serialization // before V hits ehcache. - ); - } catch (IllegalArgumentException ex) { - logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); - throw ex; - } catch (IllegalStateException ex) { - logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); - throw ex; - } + ); + } catch (IllegalArgumentException ex) { + logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); + throw ex; + } catch (IllegalStateException ex) { + logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); + throw ex; + } + }); } private CacheEventListenerConfigurationBuilder getListenerConfiguration(Builder builder) { @@ -252,25 +257,28 @@ Map, CompletableFuture, V>>> getCompletableFutur @SuppressForbidden(reason = "Ehcache uses File.io") private PersistentCacheManager buildCacheManager() { // In case we use multiple ehCaches, we can define this cache manager at a global level. - return CacheManagerBuilder.newCacheManagerBuilder() - .with(CacheManagerBuilder.persistence(new File(storagePath))) - - .using( - PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() - .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks - // like event listeners - .pool( - this.threadPoolAlias, - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_MIN_THREADS_KEY) - .get(settings), - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_MAXIMUM_THREADS_KEY) - .get(settings) - ) - .build() - ) - .build(true); + // Creating the cache manager also requires permissions specified in plugin-security.policy + return AccessController.doPrivileged((PrivilegedAction) () -> { + return CacheManagerBuilder.newCacheManagerBuilder() + .with(CacheManagerBuilder.persistence(new File(storagePath))) + + .using( + PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() + .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks + // like event listeners + .pool( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MIN_THREADS_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MAXIMUM_THREADS_KEY) + .get(settings) + ) + .build() + ) + .build(true); + }); } @Override diff --git a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy index 40007eea62dba..85c82824d5d65 100644 --- a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy @@ -9,5 +9,8 @@ grant { permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; permission java.lang.RuntimePermission "createClassLoader"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 4749aa911886d..d631855013527 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -89,7 +89,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.2.0' + api 'org.apache.xmlbeans:xmlbeans:5.2.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 deleted file mode 100644 index f34274d593697..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6198ac997b3f234f2b5393fa415f78fac2e06510 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 new file mode 100644 index 0000000000000..eaab556163e5c --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 @@ -0,0 +1 @@ +e16ddf17fe181c202b097e0dcc0ee2fed91cb7da \ No newline at end of file diff --git a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java index 7f4a9b8ca0ac7..a022b8b9bf8b0 100644 --- a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java +++ b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java @@ -32,54 +32,67 @@ package org.opensearch.ingest.attachment; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.lucene.tests.util.LuceneTestCase.SuppressFileSystems; import org.apache.lucene.tests.util.TestUtil; import org.apache.tika.metadata.Metadata; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Map; /** - * Evil test-coverage cheat, we parse a bunch of docs from tika - * so that we have a nice grab-bag variety, and assert some content - * comes back and no exception. + * Parse sample tika documents and assert the contents has not changed according to previously recorded checksums. + * Uncaught changes to tika parsing could potentially pose bwc issues. + * Note: In some cases tika will access a user's locale to inform the parsing of a file. + * The checksums of these files are left empty, and we only validate that parsed content is not null. */ @SuppressFileSystems("ExtrasFS") // don't try to parse extraN public class TikaDocTests extends OpenSearchTestCase { - /** some test files from tika test suite, zipped up */ + /** some test files from the apache tika unit test suite with accompanying sha1 checksums */ static final String TIKA_FILES = "/org/opensearch/ingest/attachment/test/tika-files/"; + static final String TIKA_CHECKSUMS = "/org/opensearch/ingest/attachment/test/.checksums"; - public void testFiles() throws Exception { - Path tmp = createTempDir(); - logger.debug("unzipping all tika sample files"); - try (DirectoryStream stream = Files.newDirectoryStream(PathUtils.get(getClass().getResource(TIKA_FILES).toURI()))) { - for (Path doc : stream) { - String filename = doc.getFileName().toString(); - TestUtil.unzip(getClass().getResourceAsStream(TIKA_FILES + filename), tmp); - } - } + public void testParseSamples() throws Exception { + String checksumJson = Files.readString(PathUtils.get(getClass().getResource(TIKA_CHECKSUMS).toURI())); + Map checksums = XContentHelper.convertToMap(JsonXContent.jsonXContent, checksumJson, false); + DirectoryStream stream = Files.newDirectoryStream(unzipToTemp(TIKA_FILES)); - try (DirectoryStream stream = Files.newDirectoryStream(tmp)) { - for (Path doc : stream) { - logger.debug("parsing: {}", doc); - assertParseable(doc); + for (Path doc : stream) { + String parsedContent = tryParse(doc); + assertNotNull(parsedContent); + assertFalse(parsedContent.isEmpty()); + + String check = checksums.get(doc.getFileName().toString()).toString(); + if (!check.isEmpty()) { + assertEquals(check, DigestUtils.sha1Hex(parsedContent)); } } + + stream.close(); } - void assertParseable(Path fileName) throws Exception { - try { - byte bytes[] = Files.readAllBytes(fileName); - String parsedContent = TikaImpl.parse(bytes, new Metadata(), -1); - assertNotNull(parsedContent); - assertFalse(parsedContent.isEmpty()); - logger.debug("extracted content: {}", parsedContent); - } catch (Exception e) { - throw new RuntimeException("parsing of filename: " + fileName.getFileName() + " failed", e); + private Path unzipToTemp(String zipDir) throws Exception { + Path tmp = createTempDir(); + DirectoryStream stream = Files.newDirectoryStream(PathUtils.get(getClass().getResource(zipDir).toURI())); + + for (Path doc : stream) { + String filename = doc.getFileName().toString(); + TestUtil.unzip(getClass().getResourceAsStream(zipDir + filename), tmp); } + + stream.close(); + return tmp; + } + + private String tryParse(Path doc) throws Exception { + byte bytes[] = Files.readAllBytes(doc); + return TikaImpl.parse(bytes, new Metadata(), -1); } } diff --git a/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/.checksums b/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/.checksums new file mode 100644 index 0000000000000..227d7d833a231 --- /dev/null +++ b/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/.checksums @@ -0,0 +1,209 @@ +{ + "testWORD_tabular_symbol.doc": "c708d7ef841f7e1748436b8ef5670d0b2de1a227", + "testWORD_1img.docx": "367e2ade13ca3c19bcd8a323e21d51d407e017ac", + "testMasterFooter.odp": "bcc59df70699c739423a50e362c722b81ae76498", + "testTXTNonASCIIUTF8.txt": "1ef514431ca8d838f11e99f8e4a0637730b77aa0", + "EmbeddedOutlook.docx": "c544a6765c19ba11b0bf3edb55c79e1bd8565c6e", + "testWORD_override_list_numbering.docx": "4e892319b921322916225def763f451e4bbb4e16", + "testTextBoxes.key": "b01581d5bd2483ce649a1a1406136359f4b93167", + "testPPT_masterText.pptx": "9fee8337b76dc3e196f4554dcde22b9dd1c3b3e8", + "testComment.docx": "333b9009686f27265b4729e8172b3e62048ec7ec", + "testRTFInvalidUnicode.rtf": "32b3e3d8e5c5a1b66cb15fc964b9341bea7048f4", + "testEXCEL_headers_footers.xlsx": "9e8d2a700fc431fe29030e86e08162fc8ecf2c1a", + "testWORD6.doc": "1479de589755c7212815445799c44dab69d4587c", + "testPagesHeadersFootersFootnotes.pages": "99d434be7de4902dc70700aa9c2a31624583c1f1", + "testPDF_no_extract_yes_accessibility_owner_empty.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "testOpenOffice2.odt": "564b3e1999a53073a04142e01b663757a6e7fb08", + "testTables.key": "250cff75db7fc3c8b95b2cbd3f37308826e0c93d", + "testDOCX_Thumbnail.docx": "fce6a43271bc242e2bb8341afa659ed166e08050", + "testWORD_3imgs.docx": "292ca6fa41d32b462e66061e89adb19423721975", + "testPDF_acroform3.pdf": "dcf6588cb5e41701b168606ea6bfbadecdcd3bc9", + "testWORD_missing_ooxml_bean1.docx": "c3058f2513fecc0a6d76d3ecf55676f236b085ff", + "testPDFTwoTextBoxes.pdf": "4adf324ce030076b1755fdb3a6cce676ee325ae4", + "testRTFUnicodeGothic.rtf": "f9932470ff686b0c217ea94ed5d4f2fd85f7998e", + "headers.mbox": "75ec25789fe870b6d25365e4ea73d731fc274847", + "testPPT_embeded.ppt": "", + "testXML3.xml": "804d4812408eb324ae8483d2140b648ec871dd2a", + "testOptionalHyphen.doc": "10f9ca38cc2985e94967aa2c454bfe40aff76976", + "testComment.doc": "66e57653d5d08478556ca640408b172b65855cc7", + "testEXCEL_headers_footers.xls": "18977c66fc8bcb8c44de3063b69b65a3de9c3f25", + "testWORD_embedded_rtf.doc": "cc2d289acfe3d1068a2649b7fa0c06c50bb6ceda", + "testEXCEL_custom_props.xlsx": "6b72ae08362a204b37dbba0a30b4134ae3e7918f", + "testOptionalHyphen.docx": "5b8ffc0df1691a8fed7d63aa9b256e9e02e36d71", + "testPPT_various.pptx": "d149de9af8071141a6ba6e2cd4ef5f6d9431a826", + "testWORD_closingSmartQInHyperLink.doc": "9859f378c603b70bf0d44a281169ae5b16a21878", + "test_embedded_zip.pptx": "d19406edcec09440d066877c451ceba60abc3483", + "testRTFUmlautSpaces.rtf": "155b39879c5b5fbad22fd650be37ae7f91489eb2", + "protectedFile.xlsx": "ee08eeaf05c35c960243f831c3a974d9ee07aa28", + "Doc1_ole.doc": "fb63220506ab666f1fe87b0608e1447fd4fd3489", + "testEXCEL_embeded.xlsx": "", + "EmbeddedDocument.docx": "", + "testODFwithOOo3.odt": "3815d6fb7f5829db882ea8ebd664f252711e6e60", + "testPagesHeadersFootersRomanUpper.pages": "85b3cd545ba6c33e5d44b844a6afea8cb6eaec0b", + "testPPT_comment.ppt": "88fd667fd0292785395a8d0d229304aa91110556", + "testPPT_2imgs.pptx": "66eda11ad472918153100dad8ee5be0f1f8e2e04", + "testPagesHeadersFootersAlphaUpper.pages": "56bef0d1eaedfd7599aae29031d2eeb0e3fe4688", + "testWORD_text_box.docx": "e01f7b05c6aac3449b9a699c3e4d2e62ff3368a3", + "testWORD_missing_text.docx": "3814332884a090b6d1020bff58d0531486710c45", + "testComment.pdf": "60e181061a00454c2e622bd37a9878234c13231d", + "testPDF_no_extract_no_accessibility_owner_empty.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "test_embedded_package.rtf": "cd90adb3f777e68aa0288fd23e8f4fbce260a763", + "testPDF_bom.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "testOptionalHyphen.ppt": "7e016e42860bd408054bb8653fef39b2756119d9", + "testHTML_utf8.html": "3ba828044754772e4c9df5f9a2213beaa75842ef", + "testPPT_comment.pptx": "25fab588194dabd5902fd2ef880ee9542d036776", + "testRTFWithCurlyBraces.rtf": "019cab63b73ff89d094823cf50c0a721bec08ee2", + "testFooter.ods": "846e1d0415b23fa27631b536b0cf566abbf8fcc1", + "testPPT.ppt": "933ee556884b1d9e28b801daa0d77bbaa4f4be62", + "testEXCEL-formats.xls": "", + "testPPT_masterFooter.pptx": "29bb97006b3608b7db6ff72b94d20157878d94dd", + "testWORD_header_hyperlink.doc": "914bbec0730c54948ad307ea3e375ef0c100abf1", + "testRTFHyperlink.rtf": "2b2ffb1997aa495fbab1af490d134051de168c97", + "testExtraSpaces.pdf": "b5575400309b01c1050a927d8d1ecf8761062abc", + "testRTFWindowsCodepage1250.rtf": "7ba418843f401634f97d21c844c2c4093b7194fb", + "testRTFTableCellSeparation2.rtf": "62782ca40ff0ed6c3ba90f8055ee724b44af203f", + "testPagesHeadersFootersRomanLower.pages": "2410fc803907001eb39c201ad4184b243e271c6d", + "headerPic.docx": "c704bb648feac7975dff1024a5f762325be7cbc2", + "testHTMLNoisyMetaEncoding_4.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testRTFBoldItalic.rtf": "0475d224078682cf3f9f3f4cbc14a63456c5a0d8", + "test-outlook.msg": "1f202fc11a873e305d5b4d4607409f3f734065ec", + "testRTFVarious.rtf": "bf6ea9cf57886e680c5e6743a66a12b950a09083", + "testXHTML.html": "c6da900f81c1c550518e65d579d3dd62dd7c5c0c", + "EmbeddedPDF.docx": "454476bdf4a968189a6f53e75c146382bf58a434", + "testXML.xml": "e1615e9b31be58f7af9ad963e5a112efa5cdaffa", + "testWORD_no_format.docx": "9a3f5d8a4c8c0f077cc615bcfc554dc87d5926aa", + "testPPT_masterText.ppt": "f5ff5e2d45ccb180cf371ed99b7dfeb2a93539b3", + "testPDF_PDFEncodedStringInXMP.pdf": "78fd59d394f72d28a9908739fa562099978dafa1", + "testPPT_custom_props.pptx": "72152d28afbc23a50cc71fa37d1dce9ef03ca72d", + "testRTFListOverride.rtf": "f8c61d8a66afdaa07f3740e859497818bfc2ca01", + "testEXCEL_1img.xls": "", + "testWORD_1img.doc": "0826d299a7770e93603f5667d89dccb7b74d904c", + "testNPEOpenDocument.odt": "4210b973c80084c58463ec637fa43e911f77d6fe", + "testRTFWord2010CzechCharacters.rtf": "9443011aac32434240ab8dbff360c970fc1c7074", + "testPDF_Version.8.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPPT.ppsx": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "testPPT_autodate.pptx": "50467dbb37d1c74b8b37fe93eddf6f9e87d21bf3", + "testWordArt.pptx": "3566bbee790704b3654fe78319957f9e0cddb6d9", + "NullHeader.docx": "18430c968ba29173b52610efdaa723424b3c4d79", + "testRTFWordPadCzechCharacters.rtf": "5dbb58452a3507c384008662f8fce90063f12189", + "resume.html": "fbfb9d8264f6eebd79847fe7a7f1b81edd4a027d", + "testPagesLayout.pages": "5db1ab91c93e6183d0af8513f62c7b87964704af", + "testOptionalHyphen.pptx": "c2977eefe7d2cad8c671f550d7883185ec65591b", + "testWORD_numbered_list.docx": "07194c58165993468e66bc4eba4f5bd89d5bee09", + "testEXCEL_1img.xlsx": "", + "testPDFTripleLangTitle.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "protect.xlsx": "ee08eeaf05c35c960243f831c3a974d9ee07aa28", + "testWORD_bold_character_runs2.docx": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "testXLSX_Thumbnail.xlsx": "020bf155ae157661c11727c54e6694cf9cd2c0d3", + "testWORD_embedded_pdf.docx": "d8adb797aaaac92afd8dd9b499bd197347f15688", + "testOptionalHyphen.rtf": "2f77b61bab5b4502b4ddd5018b454be157091d07", + "testEXCEL-charts.xls": "", + "testWORD_override_list_numbering.doc": "60e47a3e71ba08af20af96131d61740a1f0bafa3", + "testPDF_twoAuthors.pdf": "c5f0296cc21f9ae99ceb649b561c55f99d7d9452", + "testPDF_Version.10.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testHTMLNoisyMetaEncoding_2.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testFooter.odt": "cd5d0fcbcf48d6f005d087c47d00e84f39bcc321", + "testPPT.pptm": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "testPPT_various.ppt": "399e27a9893284f106dc44f15b5e636454db681e", + "testRTFListMicrosoftWord.rtf": "0303eb3e2f30530621a7a407847b759a3b21467e", + "testWORD_bold_character_runs2.doc": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "boilerplate-whitespace.html": "a9372bc75d7d84cbcbb0bce68fcaed73ad8ef52c", + "testEXCEL_95.xls": "20d9b9b0f3aecd28607516b4b837c8bab3524b6c", + "testPPT_embedded_two_slides.pptx": "", + "testPDF_bookmarks.pdf": "5fc486c443511452db4f1aa6530714c6aa49c831", + "test_recursive_embedded.docx": "afc32b07ce07ad273e5b3d1a43390a9d2b6dd0a9", + "testEXCEL-formats.xlsx": "", + "testPPT_masterText2.pptx": "2b01eab5d0349e3cfe791b28c70c2dbf4efc884d", + "test.doc": "774be3106edbb6d80be36dbb548d62401dcfa0fe", + "test_recursive_embedded_npe.docx": "afc32b07ce07ad273e5b3d1a43390a9d2b6dd0a9", + "testPPT_embedded2.ppt": "80e106b3fc68107e7f9579cff04e3b15bdfc557a", + "testWORD_custom_props.docx": "e7a737a5237a6aa9c6b3fc677eb8fa65c30d6dfe", + "testPDF_Version.4.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testBinControlWord.rtf": "ef858fbb7584ea7f92ffed8d0a08c1cc35ffee07", + "testWORD_null_style.docx": "0be9dcfb83423c78a06af514ec21e4e7770ec48e", + "test-outlook2003.msg": "bb3c35eb7e95d657d7977c1d3d52862734f9f329", + "testPDFVarious.pdf": "c66bbbacb10dd27430f7d0bed9518e75793cedae", + "testHTMLNoisyMetaEncoding_3.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testRTFCorruptListOverride.rtf": "116a782d02a7f25010a15cbbb189bf98e6b89855", + "testEXCEL_custom_props.xls": "b5584d9b13ab1566ce539238dc75e7eb3449ba7f", + "testPDF_Version.7.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPDFEmbeddingAndEmbedded.docx": "e7b648adb15cd16cdd84437c2b9524a8eeb213e4", + "testHTMLNoisyMetaEncoding_1.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testWORD_3imgs.doc": "818aa8c6c44dd78c49100c3c38e95abdf3812981", + "testRTFEmbeddedLink.rtf": "2720ffb5ff3a6bbb2c5c1cb43fb4922362ed788a", + "testKeynote.key": "11387b59fc6339bb73653fcbb26d387521b98ec9", + "testPDF.pdf": "5a377554685367764eaf73d093408ace323fcec7", + "protectedSheets.xlsx": "", + "testWORD.doc": "cdd41377e699287cbbe17fbb1498cfe5814dde23", + "testComment.xlsx": "d4be580bb97c1c90be379281179c7932b37a18c0", + "testPDFPackage.pdf": "75d6fa216b4e2880a65ced55d17ca2b599d2606c", + "testWORD_embeded.doc": "", + "testHTML.html": "6548b16c5ea33e907577615ce60ca4876a3936ef", + "testEXCEL_5.xls": "a174f098333c659d331317641d4d1d9d83055288", + "pictures.ppt": "95bbfdbf2f60f74371285c337d3445d0acd59a9b", + "testPPT_masterText2.ppt": "f5ff5e2d45ccb180cf371ed99b7dfeb2a93539b3", + "testPDF-custommetadata.pdf": "a84b914655db55574e6002b6f37209ecd4c3d462", + "testWORD_embeded.docx": "", + "testStyles.odt": "c25dd05633e3aab7132d2f5608126e2b4b03848f", + "testPDF_multiFormatEmbFiles.pdf": "2103b2c30b44d5bb3aa790ab04a6741a10ea235a", + "testXML2.xml": "a8c85a327716fad93faa4eb0f993057597d6f471", + "testPagesComments.pages": "cbb45131cf45b9c454e754a07af3ae927b1a69cc", + "testEXCEL_4.xls": "8d5e6156222151faaccb079d46ddb5393dd25771", + "testWORD_no_format.doc": "88feaf03fe58ee5cc667916c6a54cbd5d605cc1c", + "testPages.pages": "288e6db2f39604e372a2095257509c78dba22cbb", + "footnotes.docx": "33b01b73a12f9e14efbcc340890b11ee332dca8e", + "testWORD_bold_character_runs.doc": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "testWORD_custom_props.doc": "e7a737a5237a6aa9c6b3fc677eb8fa65c30d6dfe", + "testPDF_Version.11.x.PDFA-1b.pdf": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "testAnnotations.pdf": "5f599e7916198540e1b52c3e472a525f50fd45f6", + "tika434.html": "7d74122631f52f003a48018cc376026ccd8d984e", + "testPagesHeadersFootersAlphaLower.pages": "fc1d766908134ff4689fa63fa3e91c3e9b08d975", + "testRTFRegularImages.rtf": "756b1db45cb05357ceaf9c8efcf0b76e3913e190", + "testRTFUmlautSpaces2.rtf": "1fcd029357062241d74d789e93477c101ff24e3f", + "testWORD_numbered_list.doc": "e06656dd9b79ac970f3cd065fa8b630a4981556f", + "testPPT_autodate.ppt": "05b93967ea0248ad263b2f24586e125df353fd3d", + "testBulletPoints.key": "92242d67c3dbc1b22aac3f98e47061d09e7719f9", + "testMasterSlideTable.key": "1d61e2fa3c3f3615500c7f72f62971391b9e9a2f", + "testWORD_various.doc": "8cbdf1a4e0d78471eb90403612c4e92866acf0cb", + "testEXCEL_textbox.xlsx": "1e81121e91e58a74d838e414ae0fc0055a4b4100", + "big-preamble.html": "a9d759b46b6c6c1857d0d89c3a75ee2f3ace70c9", + "testWORD.docx": "f72140bef19475e950e56084d1ab1cb926697b19", + "testComment.rtf": "f6351d0f1f20c4ee0fff70adca6abbc6e638610e", + "testRTFUnicodeUCNControlWordCharacterDoubling.rtf": "3e6f2f38682e38ffc96a476ca51bec2291a27fa7", + "testPDF_Version.5.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPPTX_Thumbnail.pptx": "6aa019154289317c7b7832fe46556e6d61cd0a9f", + "testRTFTableCellSeparation.rtf": "5647290a3197c1855fad10201dc7be60ea7b0e42", + "testRTFControls.rtf": "aee6afb80e8b09cf49f056020c037f70c2757e49", + "testEXCEL.xls": "", + "testRTFJapanese.rtf": "08976f9a7d6d3a155cad84d7fa23295cb972a17a", + "testPageNumber.pdf": "96b03d2cc6782eba653af28228045964e68422b5", + "testOptionalHyphen.pdf": "12edd450ea76ea4e79f80ebd3442999ec2180dbc", + "testPDFFileEmbInAnnotation.pdf": "97a6e5781bbaa6aea040546d797c4916f9d90c86", + "testFontAfterBufferedText.rtf": "d1c8757b3ed91f2d7795234405c43005868affa3", + "testPPT_masterFooter.ppt": "8c9104385820c2631ddda20814231808fac03d4d", + "testWORD_various.docx": "189df989e80afb09281901aefc458c6630a8530b", + "testComment.ppt": "21842dd9cb8a7d4af0f102543c192861c9789705", + "testPopupAnnotation.pdf": "1717b1d16c0a4b9ff5790cac90fc8e0fba170a35", + "testWORD_bold_character_runs.docx": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "testOverlappingText.pdf": "726da7d6c184512ed8d44af2a5085d65523c4572", + "testRTF.rtf": "91e830ceba556741116c9e83b0c69a0d6c5c9304", + "testRTFIgnoredControlWord.rtf": "1eb6a2f2fd32b1bb4227c0c02a35cb6027d9ec8c", + "testComment.xls": "4de962f16452159ce302fc4a412b06a06cf9a0f6", + "testPPT.ppsm": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "boilerplate.html": "b3558f02c3179e4aeeb6057594d87bda79964e7b", + "testEXCEL_embeded.xls": "", + "testEXCEL.xlsx": "", + "testPPT_2imgs.ppt": "9a68072ffcf171389e78cf8bc018c4b568a6202d", + "testComment.pptx": "6ae6052f469b8f901fd4fd8bc70f8e267255a58e", + "testPDF_Version.6.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPPT.pptx": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "testPPT_custom_props.ppt": "edf196acc12701accc7be5dfe63e053436db45e6", + "testPPT_embeded.pptx": "", + "testRTFListLibreOffice.rtf": "4c38d9e2f0a8c9a4c2cc8d2a52db9591ab759abe", + "testPDF_Version.9.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testRTFHexEscapeInsideWord.rtf": "6cffda07e774c55b5465d8134a0bdcb8c30f3386", + "testRTFNewlines.rtf": "2375ca14e2b0d8f7ff6bbda5191544b3ee7c09fb", + "testRTF-ms932.rtf": "5f9db1b83bf8e9c4c6abb065adaeb151307d33f2", + "test_TIKA-1251.doc": "5a9394c34274964055fdd9272b4f7dc314b99ecf", + "test_list_override.rtf": "9fe8b4a36c5222fe7ed2e9b54e2330aec8fa9423" +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java index 273b69e483e8c..2f353f2a53329 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java @@ -21,6 +21,7 @@ import org.opensearch.plugin.insights.settings.QueryInsightsSettings; import org.opensearch.plugins.ActionPlugin; import org.opensearch.rest.RestHandler; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.ScalingExecutorBuilder; @@ -50,8 +51,7 @@ public void setup() { clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - clusterService = new ClusterService(settings, clusterSettings, threadPool); - + clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); } public void testGetSettings() { diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java index f340950017a5c..328ed0cd2ed15 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java @@ -22,6 +22,7 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -57,7 +58,7 @@ public void setup() { clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - clusterService = new ClusterService(settings, clusterSettings, null); + clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java index a5f36b6e8cce0..d05cf7b6a636f 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java @@ -17,6 +17,7 @@ import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -33,7 +34,7 @@ public class TransportTopQueriesActionTests extends OpenSearchTestCase { private final Settings.Builder settingsBuilder = Settings.builder(); private final Settings settings = settingsBuilder.build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - private final ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + private final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); private final TransportService transportService = mock(TransportService.class); private final QueryInsightsService topQueriesByLatencyService = mock(QueryInsightsService.class); private final ActionFilters actionFilters = mock(ActionFilters.class); diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 deleted file mode 100644 index 9dea3dfc55691..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fbe3c274a39cef5538ca8688ac7e2ad0053a6ffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..3915ab2616beb --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 @@ -0,0 +1 @@ +e6a168dba62aa63743b9e2b83f4e0f0dfdc143d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 deleted file mode 100644 index fe8e51b8e0869..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fab507bba9d477e52ed2302dc3ddbd23cbae339 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..db26ebbf738f7 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0969b0c3cb8c75d759e9a6c585c44c9b9f3a4f75 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 3954ac9c39af3..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e07032ce170277213ac4835169ca79fa0340c7b5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..bb8ecfe34d295 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +f77e7bf0e64dfcf53bfdcf2764ad7ab92b78a4da \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b13a709f1c449..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c4ca8f15e85c5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5caf947d87a1b..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f4f0c0dd54c578af2c613a0db7172bf7dca9c79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..9f6e95ba38d2e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +4d54c8d5b95b14756043efb59b8c3e62ec67aa43 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 deleted file mode 100644 index e0f52ab04ea84..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a77224107f586a7f9e3dc5d12fc0d4d8f0c04803 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..f31396d94c2ec --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b7fb401dd47c79e6b99f2319ac3b561c50c31c30 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b42cdc2835eb0..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..18d122acd2c44 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 deleted file mode 100644 index 3d631bc904f24..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..cbcbfcd87d682 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 @@ -0,0 +1 @@ +639e2c63ade6f2a49d7e501ca2264b74d240b448 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 deleted file mode 100644 index 9ceef6959744b..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..1eeedfc0926f5 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 @@ -0,0 +1 @@ +b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e019a878dfcf0..eb50bd2d0615a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,10 +67,10 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.11.3' - api 'com.google.code.gson:gson:2.10.1' + api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.7.0' + api 'commons-cli:commons-cli:1.8.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 deleted file mode 100644 index 759bc9275d346..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6504b3f17e8bc5adc6b6c8deecc90144d0154075 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 new file mode 100644 index 0000000000000..65102052409ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 @@ -0,0 +1 @@ +41a4bff12057eecb6daaf9c7f36c237815be3da1 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/gson-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.10.1.jar.sha1 deleted file mode 100644 index 9810309d1013a..0000000000000 --- a/plugins/repository-hdfs/licenses/gson-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3add478d4382b78ea20b1671390a858002feb6c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 new file mode 100644 index 0000000000000..0414a49526895 --- /dev/null +++ b/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 @@ -0,0 +1 @@ +527175ca6d81050b53bdd4c457a6d6e017626b0e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 deleted file mode 100644 index a874755cc29da..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ba1acc8ff088334f2ac5556663f8b737eb8b571 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..8f8d86e6065b2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +db3f4d3ad3d16e26991a64d50b749ae09e0e0c8e \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 560d12d14395d..00decbe4fa9cd 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -577,11 +577,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', ) } diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 deleted file mode 100644 index 83fc39246ef0a..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7307c8acbc9b331fce3496750a5112bdc726fd2a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..408f3aa5d1339 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3ca1cff0bf82bfd38e89f6946e54f24cbb3424a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 21184380d54a9..c5438d58e679d 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -73,7 +73,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.stream.StreamSupport; import fixture.s3.S3HttpHandler; @@ -165,7 +164,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10735") @Override public void testRequestStats() throws Exception { final String repository = createRepository(randomName()); @@ -207,7 +205,12 @@ public void testRequestStats() throws Exception { } catch (RepositoryMissingException e) { return null; } - }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + }).filter(b -> { + if (b instanceof BlobStoreRepository) { + return ((BlobStoreRepository) b).blobStore() != null; + } + return false; + }).map(Repository::stats).reduce(RepositoryStats::merge).get(); Map> extendedStats = repositoryStats.extendedStats; Map aggregatedStats = new HashMap<>(); @@ -249,6 +252,8 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { + GenericStatsMetricPublisher genericStatsMetricPublisher = new GenericStatsMetricPublisher(10000L, 10, 10000L, 10); + return new S3Repository( metadata, registry, @@ -263,7 +268,7 @@ protected S3Repository createRepository( false, null, null, - null + genericStatsMetricPublisher ) { @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index acf0c5e83a17b..b489a3cc85037 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -78,7 +78,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -143,9 +143,9 @@ public boolean blobExists(String blobName) { @ExperimentalApi @Override - public FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { S3RetryingInputStream s3RetryingInputStream = new S3RetryingInputStream(blobStore, buildKey(blobName)); - return new FetchBlobResult(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); + return new InputStreamWithMetadata(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); } @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index de815f9202f44..f688be9216b8f 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -244,6 +244,11 @@ public Map> extendedStats() { return extendedStats; } + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 8c0ee8ba718ac..ee557aa0efc79 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -173,11 +173,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 7d7eb330b4a55..1a94def3fdff1 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -249,11 +249,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b13a709f1c449..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c4ca8f15e85c5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b42cdc2835eb0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..18d122acd2c44 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 deleted file mode 100644 index 3d631bc904f24..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..cbcbfcd87d682 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 @@ -0,0 +1 @@ +639e2c63ade6f2a49d7e501ca2264b74d240b448 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 deleted file mode 100644 index 9ceef6959744b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..1eeedfc0926f5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 @@ -0,0 +1 @@ +b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07 \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-1.3.17.md b/release-notes/opensearch.release-notes-1.3.17.md new file mode 100644 index 0000000000000..5218b9e3be20c --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.17.md @@ -0,0 +1,6 @@ +## 2024-05-30 Version 1.3.17 Release Notes + +### Upgrades +- OpenJDK Update (April 2024 Patch releases), update to Eclipse Temurin 11.0.23+9 ([#13406](https://github.com/opensearch-project/OpenSearch/pull/13406)) +- Upgrade BouncyCastle dependencies from 1.75 to 1.78.1 resolving [CVE-2024-30172], [CVE-2024-30171] and [CVE-2024-29857] +- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) diff --git a/release-notes/opensearch.release-notes-2.14.0.md b/release-notes/opensearch.release-notes-2.14.0.md index 8ef0215baa67a..c5fc3e895c45d 100644 --- a/release-notes/opensearch.release-notes-2.14.0.md +++ b/release-notes/opensearch.release-notes-2.14.0.md @@ -84,4 +84,5 @@ - Improve the error messages for _stats with closed indices ([#13012](https://github.com/opensearch-project/OpenSearch/pull/13012)) - Ignore BaseRestHandler unconsumed content check as it's always consumed. ([#13290](https://github.com/opensearch-project/OpenSearch/pull/13290)) - Fix mapper_parsing_exception when using flat_object fields with names longer than 11 characters ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13259)) -- DATETIME_FORMATTER_CACHING_SETTING experimental feature should not default to 'true' ([#13532](https://github.com/opensearch-project/OpenSearch/pull/13532)) \ No newline at end of file +- DATETIME_FORMATTER_CACHING_SETTING experimental feature should not default to 'true' ([#13532](https://github.com/opensearch-project/OpenSearch/pull/13532)) +- Fix negative RequestStats metric issue ([#13553](https://github.com/opensearch-project/OpenSearch/pull/13553)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/50_multi_match.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/50_multi_match.yml new file mode 100644 index 0000000000000..34acb5985b555 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/50_multi_match.yml @@ -0,0 +1,35 @@ +"Cross fields do not return negative scores": + - skip: + version: " - 2.99.99" + reason: "This fix is in 2.15. Until we do the BWC dance, we need to skip all pre-3.0, though." + - do: + index: + index: test + id: 1 + body: { "color" : "orange red yellow" } + - do: + index: + index: test + id: 2 + body: { "color": "orange red purple", "shape": "red square" } + - do: + index: + index: test + id: 3 + body: { "color" : "orange red yellow purple" } + - do: + indices.refresh: { } + - do: + search: + index: test + body: + query: + multi_match: + query: "red" + type: "cross_fields" + fields: [ "color", "shape^100"] + tie_breaker: 0.1 + explain: true + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "2" } + - gt: { hits.hits.2._score: 0.0 } diff --git a/server/build.gradle b/server/build.gradle index 9714f13ec67d6..15301e68fca3d 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,7 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') - id('me.champeau.gradle.japicmp') version '0.4.2' + id('me.champeau.gradle.japicmp') version '0.4.3' } publishing { diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..08339fa8a4ce1 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +9cc4e600289bf1171b47de74536bd34c476f85a8 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index f229c373aa1af..0000000000000 --- a/server/licenses/lucene-analysis-common-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd0afb5da5dcb4c7498bd1ee7f7bab0e289404b8 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..3dce8a2162edd --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +8babfe85be7e36c893741e08072c11e71db09715 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index edaf28a7f6e76..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -88888315cd60e565960ae2e6fed2af0df077a2a2 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..943a9b2fd214b --- /dev/null +++ b/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +3c2361bd633374ae3814b175cc25ccf773f67026 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 88309bc46411a..0000000000000 --- a/server/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53a828e3e88f55c83979cd3df0704617cc9edb9a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..8587c3ed5e82a --- /dev/null +++ b/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +d9f29b49cd1e0a061ff7fa4a53e8605bd49bd3d0 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 13f1276e3b033..0000000000000 --- a/server/licenses/lucene-grouping-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1f54a816c9d85e890a862a2dffdc734ece2770c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..25579432a9cbd --- /dev/null +++ b/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +33bc26d46d62bb1cf3bf725db637226a43db7625 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 6cef51ac4453f..0000000000000 --- a/server/licenses/lucene-highlighter-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e7c9336fa86fb866fcd76ea5d6283c804b4d580 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..1bfef89965e67 --- /dev/null +++ b/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +82966698abdb8f0367a162f642560566a6085dc8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 2524ac35c2afe..0000000000000 --- a/server/licenses/lucene-join-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17be4fc1f9feca0dac84a37d54dca4b32df4c619 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..73adf3fcb2829 --- /dev/null +++ b/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +746f392e7ec27a7cd6ca2add7dd8441d2a6085da \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index f5ef377300839..0000000000000 --- a/server/licenses/lucene-memory-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7350675a2cf386c0f003b667b61db614f03bb984 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..7f7dfead4c329 --- /dev/null +++ b/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +0eb06ecc39c0ec0db380a6e5aad1b16907e0bfd9 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index e94fcf0f259a1..0000000000000 --- a/server/licenses/lucene-misc-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5c8bd120d5985ab6bd4e5f89efe08c226c0a323 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..e3d400003efd8 --- /dev/null +++ b/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +0e56eb18cceffcd5ce2e47b679e873420254df74 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index fc80394195fa9..0000000000000 --- a/server/licenses/lucene-queries-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2e1975ac26e9172722f734bf0f5583317e5eb16a \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..8e8c7f5171107 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +dee3997a72eeae905e92930f53e724b6bef279da \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 10ef577bc1bdc..0000000000000 --- a/server/licenses/lucene-queryparser-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8cf3d5dd4d0538b38e4e88bb865bc59d835d887 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..2d1df051e30b4 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +946bc45b87b3d770ab6828b0d0a5f8684f2c3624 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 08a61ba30bc0d..0000000000000 --- a/server/licenses/lucene-sandbox-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5a3a7a138ff4978f3ddb186d9786e6cb4793b291 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..0f9b7c0e90218 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +d73667f61fb5e7fde4cec52fcfbbfd9847068aec \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index a244219c1de60..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc71e0125c66d29a1bffc1ddeab4b96526e737c8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..87894603e0d84 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +a8e8ab80bfb6abd70932e50fe31e13ecf2e00987 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index d2b3821bbf5f6..0000000000000 --- a/server/licenses/lucene-spatial3d-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aef4c04d353092a438eee302521fe34188b7c4df \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 new file mode 100644 index 0000000000000..6100f6fe0d585 --- /dev/null +++ b/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 @@ -0,0 +1 @@ +45d6f0facd45d4e49585f0dabfa62ed5a1883033 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-fb97840.jar.sha1 deleted file mode 100644 index 2c147e4651a44..0000000000000 --- a/server/licenses/lucene-suggest-9.11.0-snapshot-fb97840.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86f68cacd85f99b4ddcda3aff7c873349ba59381 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.15.jar.sha1 b/server/licenses/reactor-core-3.5.15.jar.sha1 deleted file mode 100644 index 02df47ed58b9d..0000000000000 --- a/server/licenses/reactor-core-3.5.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e07a24c671235a2a806e75e9b8ff23d7d1db3d4 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.17.jar.sha1 b/server/licenses/reactor-core-3.5.17.jar.sha1 new file mode 100644 index 0000000000000..6663356bab047 --- /dev/null +++ b/server/licenses/reactor-core-3.5.17.jar.sha1 @@ -0,0 +1 @@ +2cf9b080e3a2d8a5a39948260db5fd1dae54c3ac \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index 4be049c9a9109..a1122f279c7e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -52,6 +52,7 @@ import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.concurrent.ExecutionException; @@ -60,6 +61,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { @Override @@ -139,6 +141,7 @@ public void testCreateCloneIndex() { } public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { + asyncUploadMockFsRepo = false; Version version = VersionUtils.randomIndexCompatibleVersion(random()); int numPrimaryShards = 1; prepareCreate("source").setSettings( diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java index 282eb9c6ad95e..cd19a0ee1ff77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -48,7 +48,9 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; +import org.junit.Before; import java.util.Arrays; import java.util.Map; @@ -61,12 +63,18 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { @Override protected boolean forbidPrivateIndexSettings() { return false; } + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) @@ -84,6 +92,7 @@ public void testCreateShrinkIndexToN() { int[] shardSplits = randomFrom(possibleShardSplits); assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); for (int i = 0; i < 20; i++) { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java index dd4252d24f314..dc3c8793a93f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -67,6 +67,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -86,6 +87,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 8d7b71ac7bb83..7620738c9a30d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -17,6 +17,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -39,6 +40,11 @@ public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index de7a52761c77c..0539f96e429c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -12,6 +12,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; @@ -20,7 +21,7 @@ import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.service.NodeCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStats; -import org.opensearch.common.cache.stats.ImmutableCacheStatsHolderTests; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; @@ -56,6 +57,10 @@ public static Collection parameters() { return Arrays.asList(new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }); } + /** + * Test aggregating by indices, indices+shards, shards, or no levels, and check the resulting stats + * are as we expect. + */ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { String index1Name = "index1"; String index2Name = "index2"; @@ -73,84 +78,60 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { searchIndex(client, index2Name, ""); // First, aggregate by indices only - Map xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); + ImmutableCacheStatsHolder indicesStats = getNodeCacheStatsResult(client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); - List index1Keys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index1Name); + List index1Dimensions = List.of(index1Name); // Since we searched twice, we expect to see 1 hit, 1 miss and 1 entry for index 1 ImmutableCacheStats expectedStats = new ImmutableCacheStats(1, 1, 0, 0, 1); - checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false, true); + checkCacheStatsAPIResponse(indicesStats, index1Dimensions, expectedStats, false, true); // Get the request size for one request, so we can reuse it for next index - int requestSize = (int) ((Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( - xContentMap, - index1Keys - )).get(ImmutableCacheStats.Fields.SIZE_IN_BYTES); + long requestSize = indicesStats.getStatsForDimensionValues(List.of(index1Name)).getSizeInBytes(); assertTrue(requestSize > 0); - List index2Keys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index2Name); + List index2Dimensions = List.of(index2Name); // We searched once in index 2, we expect 1 miss + 1 entry expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); + checkCacheStatsAPIResponse(indicesStats, index2Dimensions, expectedStats, true, true); // The total stats for the node should be 1 hit, 2 misses, and 2 entries expectedStats = new ImmutableCacheStats(1, 2, 0, 2 * requestSize, 2); - List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue()); - checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true, true); + List totalStatsKeys = List.of(); + checkCacheStatsAPIResponse(indicesStats, totalStatsKeys, expectedStats, true, true); // Aggregate by shards only - xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); + ImmutableCacheStatsHolder shardsStats = getNodeCacheStatsResult(client, List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); - List index1Shard0Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index1Name + "][0]" - ); + List index1Shard0Dimensions = List.of("[" + index1Name + "][0]"); expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true, true); + checkCacheStatsAPIResponse(shardsStats, index1Shard0Dimensions, expectedStats, true, true); - List index2Shard0Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index2Name + "][0]" - ); + List index2Shard0Dimensions = List.of("[" + index2Name + "][0]"); expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Shard0Keys, expectedStats, true, true); + checkCacheStatsAPIResponse(shardsStats, index2Shard0Dimensions, expectedStats, true, true); // Aggregate by indices and shards - xContentMap = getNodeCacheStatsXContentMap( + ImmutableCacheStatsHolder indicesAndShardsStats = getNodeCacheStatsResult( client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, IndicesRequestCache.SHARD_ID_DIMENSION_NAME) ); - index1Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.INDEX_DIMENSION_NAME, - index1Name, - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index1Name + "][0]" - ); + index1Dimensions = List.of(index1Name, "[" + index1Name + "][0]"); expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true, true); - - index2Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.INDEX_DIMENSION_NAME, - index2Name, - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index2Name + "][0]" - ); + checkCacheStatsAPIResponse(indicesAndShardsStats, index1Dimensions, expectedStats, true, true); + index2Dimensions = List.of(index2Name, "[" + index2Name + "][0]"); expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); - + checkCacheStatsAPIResponse(indicesAndShardsStats, index2Dimensions, expectedStats, true, true); } - // TODO: Add testCacheStatsAPIWithTieredCache when TSC stats implementation PR is merged - + /** + * Check the new stats API returns the same values as the old stats API. In particular, + * check that the new and old APIs are both correctly estimating memory size, + * using the logic that includes the overhead memory in ICacheKey. + */ public void testStatsMatchOldApi() throws Exception { - // The main purpose of this test is to check that the new and old APIs are both correctly estimating memory size, - // using the logic that includes the overhead memory in ICacheKey. String index = "index"; Client client = client(); startIndex(client, index); @@ -173,8 +154,7 @@ public void testStatsMatchOldApi() throws Exception { .getRequestCache(); assertNotEquals(0, oldApiStats.getMemorySizeInBytes()); - List xContentMapKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue()); - Map xContentMap = getNodeCacheStatsXContentMap(client, List.of()); + ImmutableCacheStatsHolder statsHolder = getNodeCacheStatsResult(client, List.of()); ImmutableCacheStats expected = new ImmutableCacheStats( oldApiStats.getHitCount(), oldApiStats.getMissCount(), @@ -183,9 +163,13 @@ public void testStatsMatchOldApi() throws Exception { 0 ); // Don't check entries, as the old API doesn't track this - checkCacheStatsAPIResponse(xContentMap, xContentMapKeys, expected, true, false); + checkCacheStatsAPIResponse(statsHolder, List.of(), expected, true, false); } + /** + * Test the XContent in the response behaves correctly when we pass null levels. + * Only the total cache stats should be returned. + */ public void testNullLevels() throws Exception { String index = "index"; Client client = client(); @@ -194,9 +178,81 @@ public void testNullLevels() throws Exception { for (int i = 0; i < numKeys; i++) { searchIndex(client, index, String.valueOf(i)); } - Map xContentMap = getNodeCacheStatsXContentMap(client, null); + Map xContentMap = getStatsXContent(getNodeCacheStatsResult(client, null)); // Null levels should result in only the total cache stats being returned -> 6 fields inside the response. - assertEquals(6, ((Map) xContentMap.get("request_cache")).size()); + assertEquals(6, xContentMap.size()); + } + + /** + * Test clearing the cache using API sets memory size and number of items to 0, but leaves other stats + * unaffected. + */ + public void testCacheClear() throws Exception { + String index = "index"; + Client client = client(); + + startIndex(client, index); + + int expectedHits = 2; + int expectedMisses = 7; + // Search for the same doc to give hits + for (int i = 0; i < expectedHits + 1; i++) { + searchIndex(client, index, ""); + } + // Search for new docs + for (int i = 0; i < expectedMisses - 1; i++) { + searchIndex(client, index, String.valueOf(i)); + } + + ImmutableCacheStats expectedTotal = new ImmutableCacheStats(expectedHits, expectedMisses, 0, 0, expectedMisses); + ImmutableCacheStatsHolder statsHolder = getNodeCacheStatsResult(client, List.of()); + // Don't check the memory size, just assert it's nonzero + checkCacheStatsAPIResponse(statsHolder, List.of(), expectedTotal, false, true); + long originalMemorySize = statsHolder.getTotalSizeInBytes(); + assertNotEquals(0, originalMemorySize); + + // Clear cache + ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(index); + client.admin().indices().clearCache(clearIndicesCacheRequest).actionGet(); + + // Now size and items should be 0 + expectedTotal = new ImmutableCacheStats(expectedHits, expectedMisses, 0, 0, 0); + statsHolder = getNodeCacheStatsResult(client, List.of()); + checkCacheStatsAPIResponse(statsHolder, List.of(), expectedTotal, true, true); + } + + /** + * Test the cache stats responses are in the expected place in XContent when we call the overall API + * GET /_nodes/stats. They should be at nodes.[node_id].caches.request_cache. + */ + public void testNodesStatsResponse() throws Exception { + String index = "index"; + Client client = client(); + + startIndex(client, index); + + NodesStatsResponse nodeStatsResponse = client.admin() + .cluster() + .prepareNodesStats("data:true") + .all() // This mimics /_nodes/stats + .get(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + Map paramMap = new HashMap<>(); + ToXContent.Params params = new ToXContent.MapParams(paramMap); + + builder.startObject(); + nodeStatsResponse.toXContent(builder, params); + builder.endObject(); + Map xContentMap = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), builder.toString(), true); + // Values should be at nodes.[node_id].caches.request_cache + // Get the node id + Map nodesResponse = (Map) xContentMap.get("nodes"); + assertEquals(1, nodesResponse.size()); + String nodeId = nodesResponse.keySet().toArray(String[]::new)[0]; + Map cachesResponse = (Map) ((Map) nodesResponse.get(nodeId)).get("caches"); + assertNotNull(cachesResponse); + // Request cache should be present in the response + assertTrue(cachesResponse.containsKey("request_cache")); } private void startIndex(Client client, String indexName) throws InterruptedException { @@ -227,8 +283,7 @@ private SearchResponse searchIndex(Client client, String index, String searchSuf return resp; } - private static Map getNodeCacheStatsXContentMap(Client client, List aggregationLevels) throws IOException { - + private static ImmutableCacheStatsHolder getNodeCacheStatsResult(Client client, List aggregationLevels) throws IOException { CommonStatsFlags statsFlags = new CommonStatsFlags(); statsFlags.includeAllCacheTypes(); String[] flagsLevels; @@ -248,16 +303,16 @@ private static Map getNodeCacheStatsXContentMap(Client client, L // Can always get the first data node as there's only one in this test suite assertEquals(1, nodeStatsResponse.getNodes().size()); NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); + return ncs.getStatsByCache(CacheType.INDICES_REQUEST_CACHE); + } + private static Map getStatsXContent(ImmutableCacheStatsHolder statsHolder) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); Map paramMap = new HashMap<>(); - if (aggregationLevels != null && !aggregationLevels.isEmpty()) { - paramMap.put("level", String.join(",", aggregationLevels)); - } ToXContent.Params params = new ToXContent.MapParams(paramMap); builder.startObject(); - ncs.toXContent(builder, params); + statsHolder.toXContent(builder, params); builder.endObject(); String resultString = builder.toString(); @@ -265,27 +320,22 @@ private static Map getNodeCacheStatsXContentMap(Client client, L } private static void checkCacheStatsAPIResponse( - Map xContentMap, - List xContentMapKeys, + ImmutableCacheStatsHolder statsHolder, + List dimensionValues, ImmutableCacheStats expectedStats, boolean checkMemorySize, boolean checkEntries ) { - // Assumes the keys point to a level whose keys are the field values ("size_in_bytes", "evictions", etc) and whose values store - // those stats - Map aggregatedStatsResponse = (Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( - xContentMap, - xContentMapKeys - ); + ImmutableCacheStats aggregatedStatsResponse = statsHolder.getStatsForDimensionValues(dimensionValues); assertNotNull(aggregatedStatsResponse); - assertEquals(expectedStats.getHits(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.HIT_COUNT)); - assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MISS_COUNT)); - assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.EVICTIONS)); + assertEquals(expectedStats.getHits(), (int) aggregatedStatsResponse.getHits()); + assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.getMisses()); + assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.getEvictions()); if (checkMemorySize) { - assertEquals(expectedStats.getSizeInBytes(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.SIZE_IN_BYTES)); + assertEquals(expectedStats.getSizeInBytes(), (int) aggregatedStatsResponse.getSizeInBytes()); } if (checkEntries) { - assertEquals(expectedStats.getItems(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ITEM_COUNT)); + assertEquals(expectedStats.getItems(), (int) aggregatedStatsResponse.getItems()); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index ae2295cb874f5..766ca2c1189e5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -1077,24 +1077,6 @@ public void testDynamicStalenessThresholdUpdate() throws Exception { // staleness threshold dynamic updates should throw exceptions on invalid input public void testInvalidStalenessThresholdUpdateThrowsException() throws Exception { - int cacheCleanIntervalInMillis = 1; - String node = internalCluster().startNode( - Settings.builder() - .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90) - .put( - IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, - TimeValue.timeValueMillis(cacheCleanIntervalInMillis) - ) - ); - Client client = client(node); - String index1 = "index1"; - setupIndex(client, index1); - - // create first cache entry in index1 - createCacheEntry(client, index1, "hello"); - assertCacheState(client, index1, 0, 1); - assertTrue(getRequestCacheStats(client, index1).getMemorySizeInBytes() > 0); - // Update indices.requests.cache.cleanup.staleness_threshold to "10%" with illegal argument assertThrows("Ratio should be in [0-1.0]", IllegalArgumentException.class, () -> { ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); @@ -1103,15 +1085,6 @@ public void testInvalidStalenessThresholdUpdateThrowsException() throws Exceptio ); client().admin().cluster().updateSettings(updateSettingsRequest).actionGet(); }); - - // everything else should continue to work fine later on. - // force refresh so that it creates 1 stale key - flushAndRefresh(index1); - // sleep until cache cleaner would have cleaned up the stale key from index 2 - assertBusy(() -> { - // cache cleaner should NOT have cleaned from index 1 - assertEquals(0, getRequestCacheStats(client, index1).getMemorySizeInBytes()); - }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); } // closing the Index after caching will clean up from Indices Request Cache diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 611dfc2756b29..0493bcf800c97 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; @@ -78,10 +79,11 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put(extraSettings) .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); } else { logger.info("Adding docrep node"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index 45679598dc551..c72b6851c1125 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -15,14 +15,21 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.settings.Settings; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteIndexPath; +import org.opensearch.index.remote.RemoteIndexPathUploader; +import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import java.nio.file.Path; +import java.util.Arrays; import java.util.List; import java.util.function.Function; import java.util.stream.Collectors; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -454,6 +461,85 @@ public void testRemotePathMetadataAddedWithFirstPrimaryMovingToRemote() throws E assertRemoteProperties(indexName); } + /** + * Scenario: + * creates an index on docrep node with non-remote cluster-manager. + * make the cluster mixed, add remote cluster-manager and data nodes. + *

+ * exclude docrep nodes, assert that remote index path file exists + * when shards start relocating to the remote nodes. + */ + public void testRemoteIndexPathFileExistsAfterMigration() throws Exception { + String docrepClusterManager = internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep nodes"); + addRemote = false; + internalCluster().startDataOnlyNodes(2, Settings.builder().put("node.attr._type", "docrep").build()); + internalCluster().validateClusterFormed(); + + logger.info("---> Creating index with 1 primary and 1 replica"); + String indexName = "migration-index"; + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + createIndexAndAssertDocrepProperties(indexName, oneReplica); + + String indexUUID = internalCluster().client() + .admin() + .indices() + .prepareGetSettings(indexName) + .get() + .getSetting(indexName, IndexMetadata.SETTING_INDEX_UUID); + + logger.info("---> Starting indexing in parallel"); + AsyncIndexingService indexingService = new AsyncIndexingService(indexName); + indexingService.startIndexing(); + + logger.info("---> Adding 2 remote enabled nodes to the cluster & cluster manager"); + initDocRepToRemoteMigration(); + addRemote = true; + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2, Settings.builder().put("node.attr._type", "remote").build()); + internalCluster().validateClusterFormed(); + + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.HASHED_PREFIX) + ) + .get() + .isAcknowledged() + ); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(docrepClusterManager)); + internalCluster().validateClusterFormed(); + + logger.info("---> Excluding docrep nodes from allocation"); + excludeNodeSet("type", "docrep"); + + waitForRelocation(); + waitNoPendingTasksOnAll(); + indexingService.stopIndexing(); + + // validate remote index path file exists + logger.info("---> Asserting remote index path file exists"); + String fileNamePrefix = String.join(RemoteIndexPathUploader.DELIMITER, indexUUID, "7", RemoteIndexPath.DEFAULT_VERSION); + + assertTrue(FileSystemUtils.exists(translogRepoPath.resolve(RemoteIndexPath.DIR))); + Path[] files = FileSystemUtils.files(translogRepoPath.resolve(RemoteIndexPath.DIR)); + assertEquals(1, files.length); + assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); + + assertTrue(FileSystemUtils.exists(segmentRepoPath.resolve(RemoteIndexPath.DIR))); + files = FileSystemUtils.files(segmentRepoPath.resolve(RemoteIndexPath.DIR)); + assertEquals(1, files.length); + assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); + } + private void createIndexAndAssertDocrepProperties(String index, Settings settings) { createIndexAssertHealthAndDocrepProperties(index, settings, this::ensureGreen); } @@ -512,5 +598,6 @@ private void assertCustomIndexMetadata(String index) { logger.info("---> Asserting custom index metadata"); IndexMetadata iMd = internalCluster().client().admin().cluster().prepareState().get().getState().metadata().index(index); assertNotNull(iMd.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + assertNotNull(iMd.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY).get(IndexMetadata.TRANSLOG_METADATA_KEY)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index d29dacb001434..280fd13f0fdcf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -11,15 +11,18 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; @@ -39,7 +42,7 @@ public Settings indexSettings(int shards, int replicas) { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } protected void restore(String... indices) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index e14a4062f7775..6b94e638a6876 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -30,7 +30,6 @@ import org.junit.Before; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; @@ -50,7 +49,7 @@ public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 501e762ea24e4..64efcee6ef1b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -36,6 +36,9 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.remotestore.translogmetadata.mocks.MockFsMetadataSupportedRepositoryPlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; @@ -48,6 +51,7 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -55,6 +59,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -74,6 +79,8 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected Path segmentRepoPath; protected Path translogRepoPath; protected boolean clusterSettingsSuppliedByTest = false; + protected boolean asyncUploadMockFsRepo = randomBoolean(); + private boolean metadataSupportedType = randomBoolean(); private final List documentKeys = List.of( randomAlphaOfLength(5), randomAlphaOfLength(5), @@ -129,6 +136,19 @@ protected Map indexData(int numberOfIterations, boolean invokeFlus return indexingStats; } + @Override + protected Collection> nodePlugins() { + if (!clusterSettingsSuppliedByTest && asyncUploadMockFsRepo) { + if (metadataSupportedType) { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsMetadataSupportedRepositoryPlugin.class)) + .collect(Collectors.toList()); + } else { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); + } + } + return super.nodePlugins(); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { @@ -138,10 +158,27 @@ protected Settings nodeSettings(int nodeOrdinal) { if (clusterSettingsSuppliedByTest) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); } else { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) - .build(); + if (asyncUploadMockFsRepo) { + String repoType = metadataSupportedType ? MockFsMetadataSupportedRepositoryPlugin.TYPE_MD : MockFsRepositoryPlugin.TYPE; + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + repoType, + REPOSITORY_2_NAME, + translogRepoPath, + repoType + ) + ) + .build(); + } else { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .build(); + } } } @@ -221,6 +258,8 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFiel @After public void teardown() { clusterSettingsSuppliedByTest = false; + asyncUploadMockFsRepo = randomBoolean(); + metadataSupportedType = randomBoolean(); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 34d223f1dd14f..a4ed908e0e362 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -39,6 +39,7 @@ import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.file.Files; @@ -69,6 +70,11 @@ public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { static final Setting MOCK_SETTING = Setting.simpleString("mock-setting"); static final String[] EXCLUDED_NODES = { "ex-1", "ex-2" }; + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java index 0bcde4b44c734..d957dda1ba04f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java @@ -19,11 +19,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -37,7 +38,7 @@ public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index ca0ae3ca9a700..7721b18a4fe6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -38,7 +38,6 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; -import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -48,7 +47,6 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -56,6 +54,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -81,7 +80,7 @@ public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, MockFsRepositoryPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Override @@ -797,25 +796,8 @@ public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionExcep // Test local only translog files which are not uploaded to remote store (no metadata present in remote) // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { - clusterSettingsSuppliedByTest = true; - - // Overriding settings to use AsyncMultiStreamBlobContainer - Settings settings = Settings.builder() - .put(super.nodeSettings(1)) - .put( - remoteStoreClusterSettings( - REPOSITORY_NAME, - segmentRepoPath, - MockFsRepositoryPlugin.TYPE, - REPOSITORY_2_NAME, - translogRepoPath, - MockFsRepositoryPlugin.TYPE - ) - ) - .build(); - - internalCluster().startClusterManagerOnlyNode(settings); - String dataNode = internalCluster().startDataOnlyNode(settings); + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); // 1. Create index with 0 replica createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index 65016c4976157..7ae08bf968ade 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -26,7 +26,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; -import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index ef2dcf3217df6..b0827dcfe4892 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -25,11 +25,11 @@ import org.opensearch.test.transport.MockTransportService; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; @@ -38,7 +38,7 @@ public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTes @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } public void testSingleNodeClusterRepositoryRegistration() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 4a0af206b9d89..31c73e2fc03ae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -54,7 +54,7 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } public void setup() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java index 9b30dacfced13..44c02dbb6d611 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java @@ -41,6 +41,7 @@ protected Settings nodeSettings(int nodeOrdinal) { * wherever not required. */ public void testRemoteIndexPathFileCreation() throws ExecutionException, InterruptedException, IOException { + asyncUploadMockFsRepo = false; String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java new file mode 100644 index 0000000000000..109a884ff6c5d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.remotestore.multipart.mocks.MockFsAsyncBlobContainer; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +public class MockFsMetadataSupportedBlobContainer extends MockFsAsyncBlobContainer { + + private static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; + + public MockFsMetadataSupportedBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { + super(blobStore, blobPath, path, triggerDataIntegrityFailure); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + // If the upload writeContext have a non-null metadata, we store the metadata content as translog.ckp file. + if (writeContext.getMetadata() != null) { + String base64String = writeContext.getMetadata().get(CHECKPOINT_FILE_DATA_KEY); + byte[] decodedBytes = Base64.getDecoder().decode(base64String); + ByteArrayInputStream inputStream = new ByteArrayInputStream(decodedBytes); + int length = decodedBytes.length; + String ckpFileName = getCheckpointFileName(writeContext.getFileName()); + writeBlob(ckpFileName, inputStream, length, true); + } + super.asyncBlobUpload(writeContext, completionListener); + } + + // This is utility to get the translog.ckp file name for a given translog.tlog file. + private String getCheckpointFileName(String translogFileName) { + if (!translogFileName.endsWith(".tlog")) { + throw new IllegalArgumentException("Invalid translog file name format: " + translogFileName); + } + + int dotIndex = translogFileName.lastIndexOf('.'); + String baseName = translogFileName.substring(0, dotIndex); + return baseName + ".ckp"; + } + + public static String convertToBase64(InputStream inputStream) throws IOException { + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = inputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new AssertionError("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + return Base64.getEncoder().encodeToString(bytes); + } + } + + // during readBlobWithMetadata call we separately download translog.ckp file and return it as metadata. + @Override + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { + String ckpFileName = getCheckpointFileName(blobName); + InputStream inputStream = readBlob(blobName); + try (InputStream ckpInputStream = readBlob(ckpFileName)) { + String ckpString = convertToBase64(ckpInputStream); + Map metadata = new HashMap<>(); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpString); + return new InputStreamWithMetadata(inputStream, metadata); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java new file mode 100644 index 0000000000000..89dd91c8222ac --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class MockFsMetadataSupportedBlobStore extends FsBlobStore { + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedBlobStore(int bufferSizeInBytes, Path path, boolean readonly, boolean triggerDataIntegrityFailure) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new MockFsMetadataSupportedBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + + // Make MockFs metadata supported + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java new file mode 100644 index 0000000000000..333fba413ce4e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.fs.FsRepository; + +public class MockFsMetadataSupportedRepository extends FsRepository { + + public static Setting TRIGGER_DATA_INTEGRITY_FAILURE = Setting.boolSetting( + "mock_fs_repository.trigger_data_integrity_failure", + false + ); + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + triggerDataIntegrityFailure = TRIGGER_DATA_INTEGRITY_FAILURE.get(metadata.settings()); + } + + @Override + protected BlobStore createBlobStore() throws Exception { + FsBlobStore fsBlobStore = (FsBlobStore) super.createBlobStore(); + return new MockFsMetadataSupportedBlobStore( + fsBlobStore.bufferSizeInBytes(), + fsBlobStore.path(), + isReadOnly(), + triggerDataIntegrityFailure + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java new file mode 100644 index 0000000000000..71ae652a6b23d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.RepositoryPlugin; +import org.opensearch.repositories.Repository; + +import java.util.Collections; +import java.util.Map; + +public class MockFsMetadataSupportedRepositoryPlugin extends Plugin implements RepositoryPlugin { + + public static final String TYPE_MD = "fs_metadata_supported_repository"; + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + "fs_metadata_supported_repository", + metadata -> new MockFsMetadataSupportedRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index a58db51780826..01ad06757640c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -1914,14 +1914,8 @@ public void testRangeQueryWithTimeZone() throws Exception { * Test range with a custom locale, e.g. "de" in this case. Documents here mention the day of week * as "Mi" for "Mittwoch (Wednesday" and "Do" for "Donnerstag (Thursday)" and the month in the query * as "Dez" for "Dezember (December)". - * Note: this test currently needs the JVM arg `-Djava.locale.providers=SPI,COMPAT` to be set. - * When running with gradle this is done implicitly through the BuildPlugin, but when running from - * an IDE this might need to be set manually in the run configuration. See also CONTRIBUTING.md section - * on "Configuring IDEs And Running Tests". */ public void testRangeQueryWithLocaleMapping() throws Exception { - assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set"; - assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1938,17 +1932,21 @@ public void testRangeQueryWithLocaleMapping() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), - client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") + client().prepareIndex("test").setId("1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"), + client().prepareIndex("test").setId("2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800") ); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800")) + .setQuery( + QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Do., 07 Dez. 2000 00:00:00 -0800") + ) .get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800")) + .setQuery( + QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Fr., 08 Dez. 2000 00:00:00 -0800") + ) .get(); assertHitCount(searchResponse, 2L); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 90bb2b501764e..b41dd99ff6d40 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -47,6 +47,7 @@ import org.opensearch.node.Node; import org.opensearch.repositories.fs.FsRepository; import org.hamcrest.MatcherAssert; +import org.junit.After; import java.io.IOException; import java.nio.file.Files; @@ -62,6 +63,10 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; +import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; +import static org.opensearch.test.NodeRoles.dataNode; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -939,6 +944,52 @@ public void testRelocateSearchableSnapshotIndex() throws Exception { assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); } + public void testCreateSearchableSnapshotWithSpecifiedRemoteDataRatio() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final String restoredIndexName1 = indexName1 + "-copy"; + final String indexName2 = "test-idx-2"; + final String restoredIndexName2 = indexName2 + "-copy"; + final int numReplicasIndex1 = 1; + final int numReplicasIndex2 = 1; + + Settings clusterManagerNodeSettings = clusterManagerOnlyNode(); + internalCluster().startNodes(2, clusterManagerNodeSettings); + Settings dateNodeSettings = dataNode(); + internalCluster().startNodes(2, dateNodeSettings); + createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, indexName1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex2, 100, indexName2); + + final Client client = client(); + assertAcked( + client.admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5)) + ); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); + + internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + + assertDocCount(restoredIndexName1, 100L); + assertDocCount(restoredIndexName2, 100L); + assertIndexDirectoryDoesNotExist(restoredIndexName1, restoredIndexName2); + } + + @After + public void cleanup() throws Exception { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey())) + ); + } + private void assertSearchableSnapshotIndexDirectoryExistence(String nodeName, Index index, boolean exists) throws Exception { final Node node = internalCluster().getInstance(Node.class, nodeName); final ShardId shardId = new ShardId(index, 0); diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index b47b974b96fed..34e1e210d7137 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -120,6 +120,7 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader } int max = 0; long minSumTTF = Long.MAX_VALUE; + int[] docCounts = new int[contexts.length]; for (int i = 0; i < contexts.length; i++) { TermStates ctx = contexts[i]; int df = ctx.docFreq(); @@ -133,6 +134,7 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader // we need to find out the minimum sumTTF to adjust the statistics // otherwise the statistics don't match minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field())); + docCounts[i] = reader.getDocCount(terms[i].field()); } } if (maxDoc > minSumTTF) { @@ -175,7 +177,11 @@ protected int compare(int i, int j) { if (prev > current) { actualDf++; } - contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf)); + // Per field, we want to guarantee that the adjusted df does not exceed the number of docs with the field. + // That is, in the IDF formula (log(1 + (N - n + 0.5) / (n + 0.5))), we need to make sure that n (the + // adjusted df) is never bigger than N (the number of docs with the field). + int fieldMaxDoc = Math.min(maxDoc, docCounts[i]); + contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(fieldMaxDoc, actualDf)); prev = current; sumTTF += ctx.totalTermFreq(); } diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java index d1748d7f80995..22d5146f5bd4f 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java @@ -51,6 +51,8 @@ class CustomFieldHighlighter extends FieldHighlighter { private static final Passage[] EMPTY_PASSAGE = new Passage[0]; + private static final Comparator DEFAULT_PASSAGE_SORT_COMPARATOR = Comparator.comparingInt(Passage::getStartOffset); + private final Locale breakIteratorLocale; private final int noMatchSize; private String fieldValue; @@ -66,7 +68,16 @@ class CustomFieldHighlighter extends FieldHighlighter { PassageFormatter passageFormatter, int noMatchSize ) { - super(field, fieldOffsetStrategy, breakIterator, passageScorer, maxPassages, maxNoHighlightPassages, passageFormatter); + super( + field, + fieldOffsetStrategy, + breakIterator, + passageScorer, + maxPassages, + maxNoHighlightPassages, + passageFormatter, + DEFAULT_PASSAGE_SORT_COMPARATOR + ); this.breakIteratorLocale = breakIteratorLocale; this.noMatchSize = noMatchSize; } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 9265c6ae60678..09cceca52ce23 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -50,7 +50,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexModule; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -59,8 +58,6 @@ import java.util.Set; import java.util.stream.Stream; -import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; - /** * Transport action for updating index settings * @@ -133,9 +130,7 @@ protected ClusterBlockException checkBlock(UpdateSettingsRequest request, Cluste for (Index index : requestIndices) { if (state.blocks().indexBlocked(ClusterBlockLevel.METADATA_WRITE, index.getName())) { allowSearchableSnapshotSettingsUpdate = allowSearchableSnapshotSettingsUpdate - && IndexModule.Type.REMOTE_SNAPSHOT.match( - state.getMetadata().getIndexSafe(index).getSettings().get(INDEX_STORE_TYPE_SETTING.getKey()) - ); + && state.getMetadata().getIndexSafe(index).isRemoteSnapshot(); } } // check if all settings in the request are in the allow list diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 4d108f8d78a69..ca2685e093d3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -138,7 +138,7 @@ public CommonStatsFlags all() { includeUnloadedSegments = false; includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; - includeCaches = EnumSet.noneOf(CacheType.class); + includeCaches = EnumSet.allOf(CacheType.class); levels = new String[0]; return this; } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java b/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java new file mode 100644 index 0000000000000..d48f82a388245 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.Objects; +import java.util.Optional; + +/** + * Class containing metrics (counters/latency) specific to ClusterManager. + * + * @opensearch.internal + */ +public final class ClusterManagerMetrics { + + private static final String LATENCY_METRIC_UNIT_MS = "ms"; + + public final Histogram clusterStateAppliersHistogram; + public final Histogram clusterStateListenersHistogram; + public final Histogram rerouteHistogram; + public final Histogram clusterStateComputeHistogram; + public final Histogram clusterStatePublishHistogram; + + public ClusterManagerMetrics(MetricsRegistry metricsRegistry) { + clusterStateAppliersHistogram = metricsRegistry.createHistogram( + "cluster.state.appliers.latency", + "Histogram for tracking the latency of cluster state appliers", + LATENCY_METRIC_UNIT_MS + ); + clusterStateListenersHistogram = metricsRegistry.createHistogram( + "cluster.state.listeners.latency", + "Histogram for tracking the latency of cluster state listeners", + LATENCY_METRIC_UNIT_MS + ); + rerouteHistogram = metricsRegistry.createHistogram( + "allocation.reroute.latency", + "Histogram for recording latency of shard re-routing", + LATENCY_METRIC_UNIT_MS + ); + clusterStateComputeHistogram = metricsRegistry.createHistogram( + "cluster.state.new.compute.latency", + "Histogram for recording time taken to compute new cluster state", + LATENCY_METRIC_UNIT_MS + ); + clusterStatePublishHistogram = metricsRegistry.createHistogram( + "cluster.state.publish.success.latency", + "Histogram for recording time taken to publish a new cluster state", + LATENCY_METRIC_UNIT_MS + ); + } + + public void recordLatency(Histogram histogram, Double value) { + histogram.record(value); + } + + public void recordLatency(Histogram histogram, Double value, Optional tags) { + if (Objects.isNull(tags) || tags.isEmpty()) { + histogram.record(value); + return; + } + histogram.record(value, tags.get()); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 1e389da479f44..d8821bb721caa 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -146,7 +146,8 @@ public ClusterModule( List clusterPlugins, ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, - ThreadContext threadContext + ThreadContext threadContext, + ClusterManagerMetrics clusterManagerMetrics ) { this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); @@ -159,7 +160,8 @@ public ClusterModule( shardsAllocator, clusterInfoService, snapshotsInfoService, - settings + settings, + clusterManagerMetrics ); } diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index e188374251d0d..e783ac1578d8f 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -47,7 +47,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.index.IndexModule; import java.io.IOException; import java.util.Collections; @@ -414,7 +413,7 @@ public Builder addBlocks(IndexMetadata indexMetadata) { if (IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.get(indexMetadata.getSettings())) { addIndexBlock(indexName, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); } - if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { + if (indexMetadata.isRemoteSnapshot()) { addIndexBlock(indexName, IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE); } return this; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 5475470b81b93..f77a7ffc8ce8e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -511,11 +511,27 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod assert existingNodes.isEmpty() == false; CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); - if (STRICT.equals(remoteStoreCompatibilityMode)) { - DiscoveryNode existingNode = existingNodes.get(0); + List reposToSkip = new ArrayList<>(1); + Optional remoteRoutingTableNode = existingNodes.stream() + .filter( + node -> node.getAttributes().get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) != null + ) + .findFirst(); + // If none of the existing nodes have routing table repo, then we skip this repo check if present in joining node. + // This ensures a new node with remote routing table repo is able to join the cluster. + if (remoteRoutingTableNode.isEmpty()) { + String joiningNodeRepoName = joiningNode.getAttributes() + .get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY); + if (joiningNodeRepoName != null) { + reposToSkip.add(joiningNodeRepoName); + } + } + + if (STRICT.equals(remoteStoreCompatibilityMode)) { + DiscoveryNode existingNode = remoteRoutingTableNode.orElseGet(() -> existingNodes.get(0)); if (joiningNode.isRemoteStoreNode()) { - ensureRemoteStoreNodesCompatibility(joiningNode, existingNode); + ensureRemoteStoreNodesCompatibility(joiningNode, existingNode, reposToSkip); } else { if (existingNode.isRemoteStoreNode()) { throw new IllegalStateException( @@ -537,19 +553,25 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod throw new IllegalStateException(reason); } if (joiningNode.isRemoteStoreNode()) { - Optional remoteDN = existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); - remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode)); + Optional remoteDN = remoteRoutingTableNode.isPresent() + ? remoteRoutingTableNode + : existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode, reposToSkip)); } } } } - private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNode existingNode) { + private static void ensureRemoteStoreNodesCompatibility( + DiscoveryNode joiningNode, + DiscoveryNode existingNode, + List reposToSkip + ) { if (joiningNode.isRemoteStoreNode()) { if (existingNode.isRemoteStoreNode()) { RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); - if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { + if (existingRemoteStoreNodeAttribute.equalsWithRepoSkip(joiningRemoteStoreNodeAttribute, reposToSkip) == false) { throw new IllegalStateException( "a remote store node [" + joiningNode diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index eae59f0049a66..2c305d1b2e69f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -65,6 +65,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; +import org.opensearch.index.IndexModule; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.indices.replication.common.ReplicationType; @@ -636,6 +637,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { static final String KEY_SYSTEM = "system"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; + public static final String TRANSLOG_METADATA_KEY = "translog_metadata"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; @@ -682,6 +684,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final ActiveShardCount waitForActiveShards; private final Map rolloverInfos; private final boolean isSystem; + private final boolean isRemoteSnapshot; private IndexMetadata( final Index index, @@ -742,6 +745,7 @@ private IndexMetadata( this.waitForActiveShards = waitForActiveShards; this.rolloverInfos = Collections.unmodifiableMap(rolloverInfos); this.isSystem = isSystem; + this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1207,6 +1211,10 @@ public boolean isSystem() { return isSystem; } + public boolean isRemoteSnapshot() { + return isRemoteSnapshot; + } + public static Builder builder(String index) { return new Builder(index); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index ad83b5b801193..81416a79ef9c3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -180,11 +180,6 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio // handling any Exception is caller's responsibility return parser.namedObject(Custom.class, name, null); } - - static Custom fromXContent(XContentParser parser) throws IOException { - String currentFieldName = parser.currentName(); - return fromXContent(parser, currentFieldName); - } } public static final Setting DEFAULT_REPLICA_COUNT_SETTING = Setting.intSetting( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 121f8d935cf48..16edec112f123 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -89,10 +89,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.remote.RemoteStoreCustomMetadataResolver; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.remote.RemoteStorePathStrategyResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -104,6 +104,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -177,7 +178,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathStrategyResolver remoteStorePathStrategyResolver; + private final RemoteStoreCustomMetadataResolver remoteStoreCustomMetadataResolver; public MetadataCreateIndexService( final Settings settings, @@ -193,7 +194,8 @@ public MetadataCreateIndexService( final SystemIndices systemIndices, final boolean forbidPrivateIndexSettings, final AwarenessReplicaBalance awarenessReplicaBalance, - final RemoteStoreSettings remoteStoreSettings + final RemoteStoreSettings remoteStoreSettings, + final Supplier repositoriesServiceSupplier ) { this.settings = settings; this.clusterService = clusterService; @@ -212,8 +214,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); - remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(remoteStoreSettings, minNodeVersionSupplier) + remoteStoreCustomMetadataResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStoreCustomMetadataResolver(remoteStoreSettings, minNodeVersionSupplier, repositoriesServiceSupplier, settings) : null; } @@ -562,7 +564,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteStorePathStrategyInCustomData(tmpImdBuilder, true); + addRemoteStoreCustomMetadata(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -572,13 +574,13 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( } /** - * Adds the remote store path type information in custom data of index metadata. + * Adds the 1) remote store path type 2) ckp as translog metadata information in custom data of index metadata. * * @param tmpImdBuilder index metadata builder. * @param assertNullOldType flag to verify that the old remote store path type is null */ - public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { - if (remoteStorePathStrategyResolver == null) { + public void addRemoteStoreCustomMetadata(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStoreCustomMetadataResolver == null) { return; } // It is possible that remote custom data exists already. In such cases, we need to only update the path type @@ -586,14 +588,21 @@ public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdB Map existingCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); assert assertNullOldType == false || Objects.isNull(existingCustomData); - // Determine the path type for use using the remoteStorePathResolver. - RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); Map remoteCustomData = new HashMap<>(); + + // Determine if the ckp would be stored as translog metadata + boolean isTranslogMetadataEnabled = remoteStoreCustomMetadataResolver.isTranslogMetadataEnabled(); + remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(isTranslogMetadataEnabled)); + + // Determine the path type for use using the remoteStorePathResolver. + RemoteStorePathStrategy newPathStrategy = remoteStoreCustomMetadataResolver.getPathStrategy(); remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); if (Objects.nonNull(newPathStrategy.getHashAlgorithm())) { remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); } - logger.trace(() -> new ParameterizedMessage("Added newStrategy={}, replaced oldStrategy={}", remoteCustomData, existingCustomData)); + logger.trace( + () -> new ParameterizedMessage("Added newCustomData={}, replaced oldCustomData={}", remoteCustomData, existingCustomData) + ); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 9b52bdd1b16c5..4b3dc7964a87b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -51,8 +51,10 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.EnumSet; import java.util.List; +import java.util.stream.Collectors; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; @@ -164,6 +166,40 @@ public boolean equalsIgnoreGenerations(@Nullable RepositoriesMetadata other) { return true; } + /** + * Checks if this instance and the give instance share the same repositories, with option to skip checking for a list of repos. + * This will support + * @param other other repositories metadata + * @param reposToSkip list of repos to skip check for equality + * @return {@code true} iff both instances contain the same repositories apart from differences in generations, not including repos provided in reposToSkip. + */ + public boolean equalsIgnoreGenerationsWithRepoSkip(@Nullable RepositoriesMetadata other, List reposToSkip) { + if (other == null) { + return false; + } + List currentRepositories = repositories.stream() + .filter(repo -> !reposToSkip.contains(repo.name())) + .collect(Collectors.toList()); + List otherRepositories = other.repositories.stream() + .filter(repo -> !reposToSkip.contains(repo.name())) + .collect(Collectors.toList()); + + if (otherRepositories.size() != currentRepositories.size()) { + return false; + } + // Sort repos by name for ordered comparison + Comparator compareByName = (o1, o2) -> o1.name().compareTo(o2.name()); + currentRepositories.sort(compareByName); + otherRepositories.sort(compareByName); + + for (int i = 0; i < currentRepositories.size(); i++) { + if (currentRepositories.get(i).equalsIgnoreGenerations(otherRepositories.get(i)) == false) { + return false; + } + } + return true; + } + @Override public int hashCode() { return repositories.hashCode(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 6a95c98815698..6158461c7d4e9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -44,7 +44,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; @@ -242,9 +241,7 @@ public GroupShardsIterator searchShards( final Set set = new HashSet<>(shards.size()); for (IndexShardRoutingTable shard : shards) { IndexMetadata indexMetadataForShard = indexMetadata(clusterState, shard.shardId.getIndex().getName()); - if (IndexModule.Type.REMOTE_SNAPSHOT.match( - indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) - ) && (preference == null || preference.isEmpty())) { + if (indexMetadataForShard.isRemoteSnapshot() && (preference == null || preference.isEmpty())) { preference = Preference.PRIMARY.type(); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java index a4ff237460e28..db10ad61c7d6d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java @@ -11,8 +11,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.allocation.RoutingAllocation; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.IndexModule; /** * {@link RoutingPool} defines the different node types based on the assigned capabilities. The methods @@ -60,10 +58,6 @@ public static RoutingPool getShardPool(ShardRouting shard, RoutingAllocation all * @return {@link RoutingPool} for the given index. */ public static RoutingPool getIndexPool(IndexMetadata indexMetadata) { - Settings indexSettings = indexMetadata.getSettings(); - if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { - return REMOTE_CAPABLE; - } - return LOCAL_ONLY; + return indexMetadata.isRemoteSnapshot() ? REMOTE_CAPABLE : LOCAL_ONLY; } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 71e562253bf58..3864e282a310b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.RestoreInProgress; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -56,10 +57,12 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.PriorityComparator; import org.opensearch.gateway.ShardsBatchGatewayAllocator; import org.opensearch.snapshots.SnapshotsInfoService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import java.util.ArrayList; import java.util.Collections; @@ -96,6 +99,7 @@ public class AllocationService { private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; private SnapshotsInfoService snapshotsInfoService; + private final ClusterManagerMetrics clusterManagerMetrics; // only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator public AllocationService( @@ -105,7 +109,13 @@ public AllocationService( ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService ) { - this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService); + this( + allocationDeciders, + shardsAllocator, + clusterInfoService, + snapshotsInfoService, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); setExistingShardsAllocators(Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator)); } @@ -113,9 +123,10 @@ public AllocationService( AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService, - SnapshotsInfoService snapshotsInfoService + SnapshotsInfoService snapshotsInfoService, + ClusterManagerMetrics clusterManagerMetrics ) { - this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService, Settings.EMPTY); + this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService, Settings.EMPTY, clusterManagerMetrics); } public AllocationService( @@ -123,14 +134,15 @@ public AllocationService( ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, - Settings settings - + Settings settings, + ClusterManagerMetrics clusterManagerMetrics ) { this.allocationDeciders = allocationDeciders; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; this.snapshotsInfoService = snapshotsInfoService; this.settings = settings; + this.clusterManagerMetrics = clusterManagerMetrics; } /** @@ -550,11 +562,15 @@ private void reroute(RoutingAllocation allocation) { assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; assert assertInitialized(); - + long rerouteStartTimeNS = System.nanoTime(); removeDelayMarkers(allocation); allocateExistingUnassignedShards(allocation); // try to allocate existing shard copies first shardsAllocator.allocate(allocation); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.rerouteHistogram, + (double) Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - rerouteStartTimeNS)) + ); assert RoutingNodes.assertShardStats(allocation.routingNodes()); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ddcccd597e894..2431f57a6a1f9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -176,7 +176,7 @@ public Metadata applyChanges(Metadata oldMetadata, RoutingTable newRoutingTable, oldMetadata.settings(), logger ); - migrationImdUpdater.maybeUpdateRemoteStorePathStrategy(indexMetadataBuilder, index.getName()); + migrationImdUpdater.maybeUpdateRemoteStoreCustomMetadata(indexMetadataBuilder, index.getName()); migrationImdUpdater.maybeAddRemoteIndexSettings(indexMetadataBuilder, index.getName()); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2c7df6b81e676..efa5115939d3c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -54,6 +54,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.snapshots.SnapshotShardSizeInfo; @@ -68,7 +69,6 @@ import static org.opensearch.cluster.routing.RoutingPool.getShardPool; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING; -import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially @@ -109,11 +109,13 @@ public class DiskThresholdDecider extends AllocationDecider { private final DiskThresholdSettings diskThresholdSettings; private final boolean enableForSingleDataNode; + private final FileCacheSettings fileCacheSettings; public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) { this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); assert Version.CURRENT.major < 9 : "remove enable_for_single_data_node in 9"; this.enableForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings); + this.fileCacheSettings = new FileCacheSettings(settings, clusterSettings); } /** @@ -179,6 +181,12 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing The following block enables allocation for remote shards within safeguard limits of the filecache. */ if (REMOTE_CAPABLE.equals(getNodePool(node)) && REMOTE_CAPABLE.equals(getShardPool(shardRouting, allocation))) { + final double dataToFileCacheSizeRatio = fileCacheSettings.getRemoteDataRatio(); + // we don't need to check the ratio + if (dataToFileCacheSizeRatio <= 0.1f) { + return Decision.YES; + } + final List remoteShardsOnNode = StreamSupport.stream(node.spliterator(), false) .filter(shard -> shard.primary() && REMOTE_CAPABLE.equals(getShardPool(shard, allocation))) .collect(Collectors.toList()); @@ -199,7 +207,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing final FileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); final long nodeCacheSize = fileCacheStats != null ? fileCacheStats.getTotal().getBytes() : 0; final long totalNodeRemoteShardSize = currentNodeRemoteShardSize + shardSize; - final double dataToFileCacheSizeRatio = DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(allocation.metadata().settings()); if (dataToFileCacheSizeRatio > 0.0f && totalNodeRemoteShardSize > dataToFileCacheSizeRatio * nodeCacheSize) { return allocation.decision( Decision.NO, @@ -208,6 +215,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing ); } return Decision.YES; + } else if (REMOTE_CAPABLE.equals(getShardPool(shardRouting, allocation))) { + return Decision.NO; } Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 26a04de31ce39..61e7aaed5ecff 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -44,7 +44,6 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import java.util.Locale; import java.util.function.BiFunction; import static org.opensearch.cluster.routing.allocation.decider.Decision.THROTTLE; @@ -211,20 +210,9 @@ private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNo allocation, currentInRecoveries, replicasInitialRecoveries, - (x, y) -> getInitialPrimaryNodeOutgoingRecoveries(x, y), + this::getInitialPrimaryNodeOutgoingRecoveries, replicasInitialRecoveries, - String.format( - Locale.ROOT, - "[%s=%d]", - CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), - replicasInitialRecoveries - ), - String.format( - Locale.ROOT, - "[%s=%d]", - CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), - replicasInitialRecoveries - ) + true ); } @@ -238,22 +226,9 @@ private Decision allocateNonInitialShardCopies(ShardRouting shardRouting, Routin allocation, currentInRecoveries, concurrentIncomingRecoveries, - (x, y) -> getPrimaryNodeOutgoingRecoveries(x, y), + this::getPrimaryNodeOutgoingRecoveries, concurrentOutgoingRecoveries, - String.format( - Locale.ROOT, - "[%s=%d] (can also be set via [%s])", - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), - concurrentIncomingRecoveries, - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() - ), - String.format( - Locale.ROOT, - "[%s=%d] (can also be set via [%s])", - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), - concurrentOutgoingRecoveries, - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() - ) + false ); } @@ -274,18 +249,30 @@ private Decision allocateShardCopies( int inRecoveriesLimit, BiFunction primaryNodeOutRecoveriesFunc, int outRecoveriesLimit, - String incomingRecoveriesSettingMsg, - String outGoingRecoveriesSettingMsg + boolean isInitialShardCopies ) { // Allocating a shard to this node will increase the incoming recoveries if (currentInRecoveries >= inRecoveriesLimit) { - return allocation.decision( - THROTTLE, - NAME, - "reached the limit of incoming shard recoveries [%d], cluster setting %s", - currentInRecoveries, - incomingRecoveriesSettingMsg - ); + if (isInitialShardCopies) { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of incoming shard recoveries [%d], cluster setting [%s=%d]", + currentInRecoveries, + CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), + inRecoveriesLimit + ); + } else { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of incoming shard recoveries [%d], cluster setting [%s=%d] (can also be set via [%s])", + currentInRecoveries, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), + inRecoveriesLimit, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() + ); + } } else { // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); @@ -294,14 +281,30 @@ private Decision allocateShardCopies( } int primaryNodeOutRecoveries = primaryNodeOutRecoveriesFunc.apply(shardRouting, allocation); if (primaryNodeOutRecoveries >= outRecoveriesLimit) { - return allocation.decision( - THROTTLE, - NAME, - "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + "cluster setting %s", - primaryNodeOutRecoveries, - primaryShard.currentNodeId(), - outGoingRecoveriesSettingMsg - ); + if (isInitialShardCopies) { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + + "cluster setting [%s=%d]", + primaryNodeOutRecoveries, + primaryShard.currentNodeId(), + CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), + inRecoveriesLimit + ); + } else { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + + "cluster setting [%s=%d] (can also be set via [%s])", + primaryNodeOutRecoveries, + primaryShard.currentNodeId(), + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), + outRecoveriesLimit, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() + ); + } } else { return allocation.decision( YES, diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java index 81cf94f640163..44c32f7830ae4 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -24,6 +24,7 @@ import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Setting; @@ -70,7 +71,7 @@ * * @opensearch.internal */ -public class RemoteRoutingTableService implements Closeable { +public class RemoteRoutingTableService extends AbstractLifecycleComponent { /** * Cluster setting to specify if routing table should be published to remote store @@ -289,16 +290,17 @@ public static DiffableUtils.MapDiff(); @@ -132,6 +142,7 @@ public ClusterApplierService(String nodeName, Settings settings, ClusterSettings CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold ); + this.clusterManagerMetrics = clusterManagerMetrics; } private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { @@ -597,7 +608,7 @@ private void callClusterStateAppliers(ClusterChangedEvent clusterChangedEvent, S callClusterStateAppliers(clusterChangedEvent, stopWatch, lowPriorityStateAppliers); } - private static void callClusterStateAppliers( + private void callClusterStateAppliers( ClusterChangedEvent clusterChangedEvent, StopWatch stopWatch, Collection clusterStateAppliers @@ -605,7 +616,13 @@ private static void callClusterStateAppliers( for (ClusterStateApplier applier : clusterStateAppliers) { logger.trace("calling [{}] with change to version [{}]", applier, clusterChangedEvent.state().version()); try (TimingHandle ignored = stopWatch.timing("running applier [" + applier + "]")) { + long applierStartTimeNS = System.nanoTime(); applier.applyClusterState(clusterChangedEvent); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateAppliersHistogram, + (double) Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - applierStartTimeNS)), + Optional.of(Tags.create().addTag("Operation", applier.getClass().getSimpleName())) + ); } } } @@ -624,7 +641,13 @@ private void callClusterStateListener( try { logger.trace("calling [{}] with change to version [{}]", listener, clusterChangedEvent.state().version()); try (TimingHandle ignored = stopWatch.timing("notifying listener [" + listener + "]")) { + long listenerStartTimeNS = System.nanoTime(); listener.clusterChanged(clusterChangedEvent); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateListenersHistogram, + (double) Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - listenerStartTimeNS)), + Optional.of(Tags.create().addTag("Operation", listener.getClass().getSimpleName())) + ); } } catch (Exception ex) { logger.warn("failed to notify ClusterStateListener", ex); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java index e9224596e048d..eaedb36a59f1e 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.service; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -20,7 +21,12 @@ */ @PublicApi(since = "2.2.0") public class ClusterManagerService extends MasterService { - public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - super(settings, clusterSettings, threadPool); + public ClusterManagerService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { + super(settings, clusterSettings, threadPool, clusterManagerMetrics); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index aa7766979e851..fa61375e85c25 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.service; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; @@ -91,12 +92,17 @@ public class ClusterService extends AbstractLifecycleComponent { private IndexingPressureService indexingPressureService; - public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public ClusterService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { this( settings, clusterSettings, - new ClusterManagerService(settings, clusterSettings, threadPool), - new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool) + new ClusterManagerService(settings, clusterSettings, threadPool, clusterManagerMetrics), + new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool, clusterManagerMetrics) ); } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index af3e4f8437c43..6436dcfe33003 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.AckedClusterStateTaskListener; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Builder; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -70,6 +71,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.discovery.Discovery; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -79,6 +81,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -135,8 +138,14 @@ public class MasterService extends AbstractLifecycleComponent { protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; private final ClusterManagerThrottlingStats throttlingStats; private final ClusterStateStats stateStats; + private final ClusterManagerMetrics clusterManagerMetrics; - public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public MasterService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); this.slowTaskLoggingThreshold = CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); @@ -154,6 +163,7 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP ); this.stateStats = new ClusterStateStats(); this.threadPool = threadPool; + this.clusterManagerMetrics = clusterManagerMetrics; } private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { @@ -303,6 +313,12 @@ private void runTasks(TaskInputs taskInputs) { final TimeValue computationTime = getTimeSince(computationStartTime); logExecutionTime(computationTime, "compute cluster state update", summary); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateComputeHistogram, + (double) computationTime.getMillis(), + Optional.of(Tags.create().addTag("Operation", taskInputs.executor.getClass().getSimpleName())) + ); + if (taskOutputs.clusterStateUnchanged()) { final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); taskOutputs.notifySuccessfulTasksOnUnchangedClusterState(); @@ -361,6 +377,7 @@ protected boolean blockingAllowed() { final long durationMillis = getTimeSince(startTimeNanos).millis(); stateStats.stateUpdateTook(durationMillis); stateStats.stateUpdated(); + clusterManagerMetrics.recordLatency(clusterManagerMetrics.clusterStatePublishHistogram, (double) durationMillis); } catch (Exception e) { stateStats.stateUpdateFailed(); onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 4f5f8d4b1ef5f..a2e4199029ef4 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -80,16 +80,16 @@ public interface BlobContainer { InputStream readBlob(String blobName) throws IOException; /** - * Creates a new {@link FetchBlobResult} for the given blob name. + * Creates a new {@link InputStreamWithMetadata} for the given blob name. * * @param blobName * The name of the blob to get an {@link InputStream} for. - * @return The {@link FetchBlobResult} of the blob. + * @return The {@link InputStreamWithMetadata} of the blob. * @throws NoSuchFileException if the blob does not exist * @throws IOException if the blob can not be read. */ @ExperimentalApi - default FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + default InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { throw new UnsupportedOperationException("readBlobWithMetadata is not implemented yet"); }; diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index 8ce8ec8e01abe..406ccc6aa4a18 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -71,6 +71,13 @@ default Map> extendedStats() { */ default void reload(RepositoryMetadata repositoryMetadata) {} + /** + * Returns a boolean indicating if blobStore has object metadata support enabled + */ + default boolean isBlobMetadataEnabled() { + return false; + } + /** * Metrics for BlobStore interactions */ diff --git a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java b/server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java similarity index 74% rename from server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java rename to server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java index 55aca771b586c..aa307e260e033 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java +++ b/server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java @@ -10,6 +10,8 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.io.Closeable; +import java.io.IOException; import java.io.InputStream; import java.util.Map; @@ -20,7 +22,7 @@ * @opensearch.experimental */ @ExperimentalApi -public class FetchBlobResult { +public class InputStreamWithMetadata implements Closeable { /** * Downloaded blob InputStream @@ -40,9 +42,15 @@ public Map getMetadata() { return metadata; } - public FetchBlobResult(InputStream inputStream, Map metadata) { + public InputStreamWithMetadata(InputStream inputStream, Map metadata) { this.inputStream = inputStream; this.metadata = metadata; } + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } } diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java index 07c75eab34194..dd94dbf61debb 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -8,6 +8,7 @@ package org.opensearch.common.cache.service; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.cache.CacheType; @@ -51,6 +52,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NodesStatsRequest.Metric.CACHE_STATS.metricName()); for (CacheType type : statsByCache.keySet()) { if (flags.getIncludeCaches().contains(type)) { builder.startObject(type.getValue()); @@ -58,6 +60,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } } + builder.endObject(); return builder; } @@ -77,4 +80,10 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(statsByCache, flags); } + + // Get the immutable cache stats for a given cache, used to avoid having to process XContent in tests. + // Safe to expose publicly as the ImmutableCacheStatsHolder can't be modified after its creation. + public ImmutableCacheStatsHolder getStatsByCache(CacheType cacheType) { + return statsByCache.get(cacheType); + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 20ed4558ae603..bd93f9d925641 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -120,7 +120,7 @@ import org.opensearch.index.ShardIndexingPressureStore; import org.opensearch.index.remote.RemoteStorePressureSettings; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; -import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; @@ -693,7 +693,7 @@ public void apply(Settings value, Settings current, Settings previous) { // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, - FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, + FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, // Settings related to Remote Refresh Segment Pressure RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED, @@ -719,6 +719,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, RemoteClusterStateService.REMOTE_STATE_READ_TIMEOUT_SETTING, + RemoteClusterStateService.REMOTE_STATE_READ_TIMEOUT_SETTING, RemoteIndexMetadataManager.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteManifestManager.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, @@ -744,9 +745,11 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, // Remote Routing table settings RemoteRoutingTableService.REMOTE_ROUTING_TABLE_ENABLED_SETTING diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 255c1c87f0d89..238df1bd90113 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -34,9 +34,9 @@ protected FeatureFlagSettings( FeatureFlags.IDENTITY_SETTING, FeatureFlags.TELEMETRY_SETTING, FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING, + FeatureFlags.TIERED_REMOTE_INDEX_SETTING, FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, FeatureFlags.PLUGGABLE_CACHE_SETTING, - FeatureFlags.REMOTE_ROUTING_TABLE_EXPERIMENTAL_SETTING + FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 1da131ab2f56c..82f43921d2d28 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -56,10 +56,10 @@ public class FeatureFlags { public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; /** - * Gates the functionality of writeable remote index + * Gates the functionality of remote index having the capability to move across different tiers * Once the feature is ready for release, this feature flag can be removed. */ - public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; + public static final String TIERED_REMOTE_INDEX = "opensearch.experimental.feature.tiered_remote_index.enabled"; /** * Gates the functionality of pluggable cache. @@ -70,7 +70,7 @@ public class FeatureFlags { /** * Gates the functionality of remote routing table. */ - public static final String REMOTE_ROUTING_TABLE_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.routing.enabled"; + public static final String REMOTE_PUBLICATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.publication.enabled"; public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, @@ -90,30 +90,25 @@ public class FeatureFlags { Property.NodeScope ); - public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( - WRITEABLE_REMOTE_INDEX, - false, - Property.NodeScope - ); + public static final Setting TIERED_REMOTE_INDEX_SETTING = Setting.boolSetting(TIERED_REMOTE_INDEX, false, Property.NodeScope); public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); - public static final Setting REMOTE_ROUTING_TABLE_EXPERIMENTAL_SETTING = Setting.boolSetting( - REMOTE_ROUTING_TABLE_EXPERIMENTAL, - true, + public static final Setting REMOTE_PUBLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_PUBLICATION_EXPERIMENTAL, + false, Property.NodeScope ); - private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, EXTENSIONS_SETTING, IDENTITY_SETTING, TELEMETRY_SETTING, DATETIME_FORMATTER_CACHING_SETTING, - WRITEABLE_REMOTE_INDEX_SETTING, + TIERED_REMOTE_INDEX_SETTING, PLUGGABLE_CACHE_SETTING, - REMOTE_ROUTING_TABLE_EXPERIMENTAL_SETTING + REMOTE_PUBLICATION_EXPERIMENTAL_SETTING ); /** * Should store the settings from opensearch.yml. diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index d2de78ffac965..3c0797cd450d2 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -117,14 +117,17 @@ public void cleanCaches() { // for tests protected ShardsBatchGatewayAllocator() { + this(DEFAULT_SHARD_BATCH_SIZE); + } + + protected ShardsBatchGatewayAllocator(long batchSize) { this.rerouteService = null; this.batchStartedAction = null; this.primaryShardBatchAllocator = null; this.batchStoreAction = null; this.replicaShardBatchAllocator = null; - this.maxBatchSize = DEFAULT_SHARD_BATCH_SIZE; + this.maxBatchSize = batchSize; } - // for tests @Override @@ -228,13 +231,13 @@ protected Set createAndUpdateBatches(RoutingAllocation allocation, boole batchEntry.getValue().getBatchedShards().forEach(shardId -> currentBatchedShards.put(shardId, batchEntry.getKey())); } - Set newShardsToBatch = Sets.newHashSet(); + Map newShardsToBatch = new HashMap<>(); Set batchedShardsToAssign = Sets.newHashSet(); // add all unassigned shards to the batch if they are not already in a batch unassigned.forEach(shardRouting -> { if ((currentBatchedShards.containsKey(shardRouting.shardId()) == false) && (shardRouting.primary() == primary)) { assert shardRouting.unassigned(); - newShardsToBatch.add(shardRouting); + newShardsToBatch.put(shardRouting.shardId(), shardRouting); } // if shard is already batched update to latest shardRouting information in the batches // Replica shard assignment can be cancelled if we get a better match. These ShardRouting objects also @@ -262,7 +265,7 @@ else if (shardRouting.primary() == primary) { refreshShardBatches(currentBatches, batchedShardsToAssign, primary); - Iterator iterator = newShardsToBatch.iterator(); + Iterator iterator = newShardsToBatch.values().iterator(); assert maxBatchSize > 0 : "Shards batch size must be greater than 0"; long batchSize = maxBatchSize; diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 4b5bcdeb7b6cf..26b1d0c4de98f 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -51,7 +51,6 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -104,6 +103,7 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; + /** * A Service which provides APIs to upload and download cluster metadata from remote store. * @@ -142,7 +142,8 @@ public class RemoteClusterStateService implements Closeable { private final ThreadPool threadpool; private final List indexMetadataUploadListeners; private BlobStoreRepository blobStoreRepository; - private RemoteRoutingTableService remoteRoutingTableService; + private BlobStoreTransferService blobStoreTransferService; + private Optional remoteRoutingTableService; private volatile TimeValue slowWriteLoggingThreshold; private final RemotePersistenceStats remoteStateStats; @@ -152,7 +153,6 @@ public class RemoteClusterStateService implements Closeable { private RemoteClusterStateAttributesManager remoteClusterStateAttributesManager; private RemoteManifestManager remoteManifestManager; private ClusterSettings clusterSettings; - private BlobStoreTransferService blobStoreTransferService; private final String CLUSTER_STATE_UPLOAD_TIME_LOG_STRING = "writing cluster state for version [{}] took [{}ms]"; private final String METADATA_UPDATE_LOG_STRING = "wrote metadata for [{}] indices and skipped [{}] unchanged " + "indices, coordination metadata updated : [{}], settings metadata updated : [{}], templates metadata " @@ -198,13 +198,9 @@ public RemoteClusterStateService( clusterSettings.addSettingsUpdateConsumer(REMOTE_STATE_READ_TIMEOUT_SETTING, this::setRemoteClusterStateEnabled); this.remoteStateStats = new RemotePersistenceStats(); - if (isRemoteRoutingTableEnabled(settings)) { - this.remoteRoutingTableService = new RemoteRoutingTableService(repositoriesService, - settings, threadPool); - logger.info("REMOTE ROUTING ENABLED"); - } else { - logger.info("REMOTE ROUTING DISABLED"); - } + this.remoteRoutingTableService = isRemoteRoutingTableEnabled(settings) + ? Optional.of(new RemoteRoutingTableService(repositoriesService, settings, threadPool)) + : Optional.empty(); this.lastCleanupAttemptState = 0; this.isClusterManagerNode = DiscoveryNode.isClusterManagerNode(settings); this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService); @@ -303,6 +299,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( // remove all the customs which are present currently customsToBeDeletedFromRemote.remove(custom); } + final Map indicesToBeDeletedFromRemote = new HashMap<>(previousClusterState.metadata().indices()); for (final String custom : clusterState.customs().keySet()) { // remove all the custom which are present currently @@ -397,8 +394,8 @@ public ClusterMetadataManifest writeIncrementalMetadata( clusterStateCustomsToBeDeleted.keySet().forEach(allUploadedCustomMap::remove); List allUploadedIndicesRouting = new ArrayList<>(); - if (remoteRoutingTableService != null) { - allUploadedIndicesRouting = remoteRoutingTableService.getAllUploadedIndicesRouting(previousManifest, + if (remoteRoutingTableService.isPresent()) { + allUploadedIndicesRouting = remoteRoutingTableService.get().getAllUploadedIndicesRouting(previousManifest, uploadedMetadataResults.uploadedIndicesRoutingMetadata, indicesToBeDeletedFromRemote.keySet()); } @@ -464,8 +461,6 @@ public ClusterMetadataManifest writeIncrementalMetadata( ); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( - "{} which is above the warn threshold of [{}]; {}", - clusterStateUploadTimeMessage, "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", @@ -515,7 +510,8 @@ private UploadedMetadataResults writeMetadataInParallel( int totalUploadTasks = indexToUpload.size() + indexMetadataUploadListeners.size() + customToUpload.size() + (uploadCoordinationMetadata ? 1 : 0) + (uploadSettingsMetadata ? 1 : 0) + (uploadTemplateMetadata ? 1 : 0) + (uploadDiscoveryNodes ? 1 : 0) + (uploadClusterBlock ? 1 : 0) + indicesRoutingToUpload.size() + - (uploadTransientSettingMetadata ? 1 : 0) + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0); + (uploadTransientSettingMetadata ? 1 : 0) + clusterStateCustomToUpload.size(); + // + (uploadHashesOfConsistentSettings ? 1 : 0); CountDownLatch latch = new CountDownLatch(totalUploadTasks); Map> uploadTasks = new ConcurrentHashMap<>(totalUploadTasks); Map results = new ConcurrentHashMap<>(totalUploadTasks); @@ -605,18 +601,18 @@ private UploadedMetadataResults writeMetadataInParallel( ) ); } - if (uploadHashesOfConsistentSettings) { - uploadTasks.put( - HASHES_OF_CONSISTENT_SETTINGS, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new DiffableStringMap(clusterState.metadata().hashesOfConsistentSettings()), - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - listener, - null - ) - ); - } +// if (uploadHashesOfConsistentSettings) { +// uploadTasks.put( +// HASHES_OF_CONSISTENT_SETTINGS, +// remoteGlobalMetadataManager.getAsyncMetadataWriteAction( +// new DiffableStringMap(clusterState.metadata().hashesOfConsistentSettings()), +// clusterState.metadata().version(), +// clusterState.metadata().clusterUUID(), +// listener, +// null +// ) +// ); +// } customToUpload.forEach((key, value) -> { String customComponent = String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, key); uploadTasks.put( @@ -652,7 +648,7 @@ private UploadedMetadataResults writeMetadataInParallel( try { uploadTasks.put( indexRoutingTable.getIndex().getName() + "--indexRouting", - remoteRoutingTableService.getIndexRoutingAsyncAction(clusterState, indexRoutingTable, listener) + remoteRoutingTableService.get().getIndexRoutingAsyncAction(clusterState, indexRoutingTable, listener) ); } catch (IOException e) { e.printStackTrace(); @@ -863,8 +859,8 @@ public void close() throws IOException { if (blobStoreRepository != null) { IOUtils.close(blobStoreRepository); } - if (this.remoteRoutingTableService != null) { - this.remoteRoutingTableService.close(); + if (this.remoteRoutingTableService.isPresent()) { + this.remoteRoutingTableService.get().close(); } } @@ -877,9 +873,7 @@ public void start() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; - if (this.remoteRoutingTableService != null) { - this.remoteRoutingTableService.start(); - } + this.remoteRoutingTableService.ifPresent(RemoteRoutingTableService::start); String clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); RemoteWritableEntityStore globalMetadataBlobStore = new RemoteClusterStateBlobStore<>(getBlobStoreTransferService(), blobStoreRepository, clusterName, threadpool, ThreadPool.Names.GENERIC); @@ -923,8 +917,8 @@ private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; } - //Package private for unit test - RemoteRoutingTableService getRemoteRoutingTableService() { + // Package private for unit test + Optional getRemoteRoutingTableService() { return this.remoteRoutingTableService; } @@ -948,7 +942,6 @@ BlobStore getBlobStore() { * @return {@link IndexMetadata} */ public ClusterState getLatestClusterState(String clusterName, String clusterUUID, boolean includeEphemeral) throws IOException { - start(); Optional clusterMetadataManifest = remoteManifestManager.getLatestClusterMetadataManifest( clusterName, clusterUUID @@ -1030,7 +1023,7 @@ private ClusterState readClusterStateInParallel( for (UploadedIndexMetadata indexRouting : indicesRoutingToRead) { asyncMetadataReadActions.add( - remoteRoutingTableService.getAsyncIndexMetadataReadAction( + remoteRoutingTableService.get().getAsyncIndexMetadataReadAction( indexRouting.getUploadedFilename(), new Index(indexRouting.getIndexName(), indexRouting.getIndexUUID()), routingTableLatchedActionListener @@ -1203,6 +1196,7 @@ private ClusterState readClusterStateInParallel( break; case HASHES_OF_CONSISTENT_SETTINGS: metadataBuilder.hashesOfConsistentSettings((DiffableStringMap) remoteReadResult.getObj()); + break; case CLUSTER_STATE_ATTRIBUTE: if (remoteReadResult.getComponentName().equals(DISCOVERY_NODES)) { discoveryNodesBuilder.set(DiscoveryNodes.builder((DiscoveryNodes) remoteReadResult.getObj())); @@ -1267,7 +1261,7 @@ public ClusterState getClusterStateUsingDiff(String clusterName, ClusterMetadata return uploadedIndexMetadataOptional.get(); }).collect(Collectors.toList()); - List updatedIndexRouting = remoteRoutingTableService.getUpdatedIndexRoutingTableMetadata(diff.getIndicesRoutingUpdated(), + List updatedIndexRouting = remoteRoutingTableService.get().getUpdatedIndexRoutingTableMetadata(diff.getIndicesRoutingUpdated(), manifest.getIndicesRouting()); Map updatedCustomMetadata = new HashMap<>(); @@ -1387,6 +1381,22 @@ Set getAllClusterUUIDs(String clusterName) throws IOException { return Collections.unmodifiableSet(clusterUUIDMetadata.keySet()); } + private Map getLatestManifestForAllClusterUUIDs(String clusterName, Set clusterUUIDs) { + Map manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e + ); + } + } + return manifestsByClusterUUID; + } + /** * This method creates a valid cluster UUID chain. * @@ -1445,8 +1455,9 @@ private List createClusterChain(final Map CUSTOM_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "custom", - METADATA_NAME_FORMAT, - Metadata.Custom::fromXContent - ); - public final ChecksumBlobStoreFormat customBlobStoreFormat; private Custom custom; diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9d8ab6815eecc..6c0ab2f6b0153 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -728,7 +728,6 @@ public static IndexMergePolicy fromString(String text) { private volatile TimeValue remoteTranslogUploadBufferInterval; private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; - private final boolean isRemoteSnapshot; private int remoteTranslogKeepExtraGen; private Version extendedCompatibilitySnapshotVersion; @@ -763,6 +762,7 @@ public static IndexMergePolicy fromString(String text) { private final boolean widenIndexSortType; private final boolean assignedOnRemoteNode; private final RemoteStorePathStrategy remoteStorePathStrategy; + private final boolean isTranslogMetadataEnabled; /** * The maximum age of a retention lease before it is considered expired. @@ -918,9 +918,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); - isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); - if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { + if (isRemoteSnapshot() && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { extendedCompatibilitySnapshotVersion = SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; } else { extendedCompatibilitySnapshotVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); @@ -989,6 +988,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); remoteStorePathStrategy = RemoteStoreUtils.determineRemoteStorePathStrategy(indexMetadata); + isTranslogMetadataEnabled = RemoteStoreUtils.determineTranslogMetadataEnabled(indexMetadata); + setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1275,7 +1276,7 @@ public String getRemoteStoreTranslogRepository() { * Returns true if this is remote/searchable snapshot */ public boolean isRemoteSnapshot() { - return isRemoteSnapshot; + return indexMetadata.isRemoteSnapshot(); } /** @@ -1911,4 +1912,8 @@ public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePo public RemoteStorePathStrategy getRemoteStorePathStrategy() { return remoteStorePathStrategy; } + + public boolean isTranslogMetadataEnabled() { + return isTranslogMetadataEnabled; + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java index 7ebe4e5f0b0e8..b502e41cbb97b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java @@ -8,6 +8,7 @@ package org.opensearch.index.mapper; +import org.opensearch.Version; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,6 +19,7 @@ import org.opensearch.script.Script; import java.io.IOException; +import java.util.Map; import java.util.Objects; /** @@ -25,10 +27,13 @@ */ @PublicApi(since = "2.14.0") public class DerivedField implements Writeable, ToXContentFragment { - private final String name; private final String type; private final Script script; + private String sourceIndexedField; + private Map properties; + private Boolean ignoreMalformed; + private String format; public DerivedField(String name, String type, Script script) { this.name = name; @@ -40,6 +45,14 @@ public DerivedField(StreamInput in) throws IOException { name = in.readString(); type = in.readString(); script = new Script(in); + if (in.getVersion().onOrAfter(Version.V_2_15_0)) { + if (in.readBoolean()) { + properties = in.readMap(); + } + sourceIndexedField = in.readOptionalString(); + format = in.readOptionalString(); + ignoreMalformed = in.readOptionalBoolean(); + } } @Override @@ -47,6 +60,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(type); script.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_2_15_0)) { + if (properties == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeMap(properties); + } + out.writeOptionalString(sourceIndexedField); + out.writeOptionalString(format); + out.writeOptionalBoolean(ignoreMalformed); + } } @Override @@ -54,6 +78,18 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(name); builder.field("type", type); builder.field("script", script); + if (properties != null) { + builder.field("properties", properties); + } + if (sourceIndexedField != null) { + builder.field("source_indexed_field", sourceIndexedField); + } + if (format != null) { + builder.field("format", format); + } + if (ignoreMalformed != null) { + builder.field("ignore_malformed", ignoreMalformed); + } builder.endObject(); return builder; } @@ -70,9 +106,41 @@ public Script getScript() { return script; } + public Map getProperties() { + return properties; + } + + public String getSourceIndexedField() { + return sourceIndexedField; + } + + public String getFormat() { + return format; + } + + public boolean getIgnoreMalformed() { + return Boolean.TRUE.equals(ignoreMalformed); + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + public void setSourceIndexedField(String sourceIndexedField) { + this.sourceIndexedField = sourceIndexedField; + } + + public void setFormat(String format) { + this.format = format; + } + + public void setIgnoreMalformed(boolean ignoreMalformed) { + this.ignoreMalformed = ignoreMalformed; + } + @Override public int hashCode() { - return Objects.hash(name, type, script); + return Objects.hash(name, type, script, sourceIndexedField, properties, ignoreMalformed, format); } @Override @@ -84,7 +152,12 @@ public boolean equals(Object obj) { return false; } DerivedField other = (DerivedField) obj; - return Objects.equals(name, other.name) && Objects.equals(type, other.type) && Objects.equals(script, other.script); + return Objects.equals(name, other.name) + && Objects.equals(type, other.type) + && Objects.equals(script, other.script) + && Objects.equals(sourceIndexedField, other.sourceIndexedField) + && Objects.equals(properties, other.properties) + && Objects.equals(ignoreMalformed, other.ignoreMalformed) + && Objects.equals(format, other.format); } - } diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeInference.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeInference.java new file mode 100644 index 0000000000000..713bdc4e691cd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeInference.java @@ -0,0 +1,181 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.ReaderUtil; +import org.opensearch.common.Randomness; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.TreeSet; + +/** + * This class performs type inference by analyzing the _source documents. It uses a random sample of documents to infer the field type, similar to dynamic mapping type guessing logic. + * Unlike guessing based on the first document, where field could be missing, this method generates a random sample to make a more accurate inference. + * This approach is especially useful for handling missing fields, which is common in nested fields within derived fields of object types. + * + *

The sample size should be chosen carefully to ensure a high probability of selecting at least one document where the field is present. + * However, it's essential to strike a balance because a large sample size can lead to performance issues since each sample document's _source field is loaded and examined until the field is found. + * + *

Determining the sample size ({@code S}) is akin to deciding how many balls to draw from a bin, ensuring a high probability ({@code >=P}) of drawing at least one green ball (documents with the field) from a mixture of {@code R } red balls (documents without the field) and {@code G } green balls: + *

{@code
+ * P >= 1 - C(R, S) / C(R + G, S)
+ * }
+ * Here, {@code C()} represents the binomial coefficient. + * For a high confidence level, we aim for {@code P >= 0.95 }. For example, with {@code 10^7 } documents where the field is present in {@code 2% } of them, the sample size {@code S } should be around 149 to achieve a probability of {@code 0.95}. + */ +public class FieldTypeInference { + private final IndexReader indexReader; + private final String indexName; + private final MapperService mapperService; + // TODO expose using a index setting + private int sampleSize; + private static final int DEFAULT_SAMPLE_SIZE = 150; + private static final int MAX_SAMPLE_SIZE_ALLOWED = 1000; + + public FieldTypeInference(String indexName, MapperService mapperService, IndexReader indexReader) { + this.indexName = indexName; + this.mapperService = mapperService; + this.indexReader = indexReader; + this.sampleSize = DEFAULT_SAMPLE_SIZE; + } + + public void setSampleSize(int sampleSize) { + if (sampleSize > MAX_SAMPLE_SIZE_ALLOWED) { + throw new IllegalArgumentException("sample_size should be less than " + MAX_SAMPLE_SIZE_ALLOWED); + } + this.sampleSize = sampleSize; + } + + public int getSampleSize() { + return sampleSize; + } + + public Mapper infer(ValueFetcher valueFetcher) throws IOException { + RandomSourceValuesGenerator valuesGenerator = new RandomSourceValuesGenerator(sampleSize, indexReader, valueFetcher); + Mapper inferredMapper = null; + while (inferredMapper == null && valuesGenerator.hasNext()) { + List values = valuesGenerator.next(); + if (values == null || values.isEmpty()) { + continue; + } + // always use first value in case of multi value field to infer type + inferredMapper = inferTypeFromObject(values.get(0)); + } + return inferredMapper; + } + + private Mapper inferTypeFromObject(Object o) throws IOException { + if (o == null) { + return null; + } + DocumentMapper mapper = mapperService.documentMapper(); + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("field", o).endObject(); + BytesReference bytesReference = BytesReference.bytes(builder); + SourceToParse sourceToParse = new SourceToParse(indexName, "_id", bytesReference, JsonXContent.jsonXContent.mediaType()); + ParsedDocument parsedDocument = mapper.parse(sourceToParse); + Mapping mapping = parsedDocument.dynamicMappingsUpdate(); + return mapping.root.getMapper("field"); + } + + private static class RandomSourceValuesGenerator implements Iterator> { + private final ValueFetcher valueFetcher; + private final IndexReader indexReader; + private final SourceLookup sourceLookup; + private final int[] docs; + private int iter; + private int leaf; + private final int MAX_ATTEMPTS_TO_GENERATE_RANDOM_SAMPLES = 10000; + + public RandomSourceValuesGenerator(int sampleSize, IndexReader indexReader, ValueFetcher valueFetcher) { + this.valueFetcher = valueFetcher; + this.indexReader = indexReader; + sampleSize = Math.min(sampleSize, indexReader.numDocs()); + this.docs = getSortedRandomNum( + sampleSize, + indexReader.numDocs(), + Math.max(sampleSize, MAX_ATTEMPTS_TO_GENERATE_RANDOM_SAMPLES) + ); + this.iter = 0; + this.leaf = -1; + this.sourceLookup = new SourceLookup(); + if (hasNext()) { + setNextLeaf(); + } + } + + @Override + public boolean hasNext() { + return iter < docs.length && leaf < indexReader.leaves().size(); + } + + /** + * Ensure hasNext() is called before calling next() + */ + @Override + public List next() { + int docID = docs[iter] - indexReader.leaves().get(leaf).docBase; + if (docID >= indexReader.leaves().get(leaf).reader().numDocs()) { + setNextLeaf(); + } + // deleted docs are getting used to infer type, which should be okay? + sourceLookup.setSegmentAndDocument(indexReader.leaves().get(leaf), docs[iter] - indexReader.leaves().get(leaf).docBase); + try { + iter++; + return valueFetcher.fetchValues(sourceLookup); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private void setNextLeaf() { + int readerIndex = ReaderUtil.subIndex(docs[iter], indexReader.leaves()); + if (readerIndex != leaf) { + leaf = readerIndex; + } else { + // this will only happen when leaves are exhausted and readerIndex will be indexReader.leaves()-1. + leaf++; + } + if (leaf < indexReader.leaves().size()) { + valueFetcher.setNextReader(indexReader.leaves().get(leaf)); + } + } + + private static int[] getSortedRandomNum(int sampleSize, int upperBound, int attempts) { + Set generatedNumbers = new TreeSet<>(); + Random random = Randomness.get(); + int itr = 0; + if (upperBound <= 10 * sampleSize) { + List numberList = new ArrayList<>(); + for (int i = 0; i < upperBound; i++) { + numberList.add(i); + } + Collections.shuffle(numberList, random); + generatedNumbers.addAll(numberList.subList(0, sampleSize)); + } else { + while (generatedNumbers.size() < sampleSize && itr++ < attempts) { + int randomNumber = random.nextInt(upperBound); + generatedNumbers.add(randomNumber); + } + } + return generatedNumbers.stream().mapToInt(Integer::valueOf).toArray(); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index fc8654216e187..a1f3894c9f14c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -714,6 +714,13 @@ public boolean isMetadataField(String field) { return mapperRegistry.isMetadataField(field); } + /** + * Returns a set containing the registered metadata fields + */ + public Set getMetadataFields() { + return Collections.unmodifiableSet(mapperRegistry.getMetadataMapperParsers().keySet()); + } + /** * An analyzer wrapper that can lookup fields within the index mappings */ diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 3f97b3918a126..b5ba79632b622 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -401,16 +401,32 @@ protected void doBuild(SearchContext parentSearchContext, InnerHitsContext inner } } String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : nestedObjectMapper.fullPath(); - ObjectMapper parentObjectMapper = queryShardContext.nestedScope().nextLevel(nestedObjectMapper); - NestedInnerHitSubContext nestedInnerHits = new NestedInnerHitSubContext( - name, - parentSearchContext, - parentObjectMapper, - nestedObjectMapper - ); - setupInnerHitsContext(queryShardContext, nestedInnerHits); - queryShardContext.nestedScope().previousLevel(); - innerHitsContext.addInnerHitDefinition(nestedInnerHits); + ObjectMapper parentObjectMapper = queryShardContext.nestedScope().getObjectMapper(); + BitSetProducer parentFilter; + if (parentObjectMapper == null) { + parentFilter = queryShardContext.bitsetFilter(Queries.newNonNestedFilter()); + } else { + parentFilter = queryShardContext.bitsetFilter(parentObjectMapper.nestedTypeFilter()); + } + BitSetProducer previousParentFilter = queryShardContext.getParentFilter(); + try { + queryShardContext.setParentFilter(parentFilter); + queryShardContext.nestedScope().nextLevel(nestedObjectMapper); + try { + NestedInnerHitSubContext nestedInnerHits = new NestedInnerHitSubContext( + name, + parentSearchContext, + parentObjectMapper, + nestedObjectMapper + ); + setupInnerHitsContext(queryShardContext, nestedInnerHits); + innerHitsContext.addInnerHitDefinition(nestedInnerHits); + } finally { + queryShardContext.nestedScope().previousLevel(); + } + } finally { + queryShardContext.setParentFilter(previousParentFilter); + } } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java index 761fa20ea64e5..cc51fcd2f18f6 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java @@ -28,7 +28,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStorePathStrategyDuringMigration; +import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStoreCustomMetadataDuringMigration; import static org.opensearch.index.remote.RemoteStoreUtils.getRemoteStoreRepoName; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -118,7 +118,7 @@ private boolean needsRemoteIndexSettingsUpdate( } /** - * Updates the remote store path strategy metadata for the index when it is migrating to remote. + * Updates the remote store custom metadata for the index when it is migrating to remote. * This is run during state change of each shard copy when the cluster is in `MIXED` mode and the direction of migration is `REMOTE_STORE` * Should not interfere with docrep functionality even if the index is in docrep nodes since this metadata * is not used anywhere in the docrep flow @@ -127,20 +127,20 @@ private boolean needsRemoteIndexSettingsUpdate( * @param indexMetadataBuilder Mutated {@link IndexMetadata.Builder} having the previous state updates * @param index index name */ - public void maybeUpdateRemoteStorePathStrategy(IndexMetadata.Builder indexMetadataBuilder, String index) { - if (indexHasRemotePathMetadata(indexMetadata) == false) { - logger.info("Adding remote store path strategy for index [{}] during migration", index); + public void maybeUpdateRemoteStoreCustomMetadata(IndexMetadata.Builder indexMetadataBuilder, String index) { + if (indexHasRemoteCustomMetadata(indexMetadata) == false) { + logger.info("Adding remote store custom data for index [{}] during migration", index); indexMetadataBuilder.putCustom( REMOTE_STORE_CUSTOM_KEY, - determineRemoteStorePathStrategyDuringMigration(clusterSettings, discoveryNodes) + determineRemoteStoreCustomMetadataDuringMigration(clusterSettings, discoveryNodes) ); } else { - logger.debug("Index {} already has remote store path strategy", index); + logger.debug("Index {} already has remote store custom data", index); } } public static boolean indexHasAllRemoteStoreRelatedMetadata(IndexMetadata indexMetadata) { - return indexHasRemoteStoreSettings(indexMetadata.getSettings()) && indexHasRemotePathMetadata(indexMetadata); + return indexHasRemoteStoreSettings(indexMetadata.getSettings()) && indexHasRemoteCustomMetadata(indexMetadata); } /** @@ -167,9 +167,11 @@ public static boolean indexHasRemoteStoreSettings(Settings indexSettings) { * @param indexMetadata Current index metadata * @return true if all above conditions match. false otherwise */ - public static boolean indexHasRemotePathMetadata(IndexMetadata indexMetadata) { + public static boolean indexHasRemoteCustomMetadata(IndexMetadata indexMetadata) { Map customMetadata = indexMetadata.getCustomData(REMOTE_STORE_CUSTOM_KEY); - return Objects.nonNull(customMetadata) && Objects.nonNull(customMetadata.get(PathType.NAME)); + return Objects.nonNull(customMetadata) + && (Objects.nonNull(customMetadata.get(PathType.NAME)) + || Objects.nonNull(customMetadata.get(IndexMetadata.TRANSLOG_METADATA_KEY))); } public static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, String segmentRepository, String translogRepository) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java new file mode 100644 index 0000000000000..e8a0dda5a699e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.function.Supplier; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; + +/** + * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. + * + * @opensearch.internal + */ +@ExperimentalApi +public class RemoteStoreCustomMetadataResolver { + + private final RemoteStoreSettings remoteStoreSettings; + private final Supplier minNodeVersionSupplier; + private final Supplier repositoriesServiceSupplier; + private final Settings settings; + + public RemoteStoreCustomMetadataResolver( + RemoteStoreSettings remoteStoreSettings, + Supplier minNodeVersionSupplier, + Supplier repositoriesServiceSupplier, + Settings settings + ) { + this.remoteStoreSettings = remoteStoreSettings; + this.minNodeVersionSupplier = minNodeVersionSupplier; + this.repositoriesServiceSupplier = repositoriesServiceSupplier; + this.settings = settings; + } + + public RemoteStorePathStrategy getPathStrategy() { + PathType pathType; + PathHashAlgorithm pathHashAlgorithm; + // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. + pathType = Version.V_2_14_0.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; + // If the path type is fixed, hash algorithm is not applicable. + pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + + public boolean isTranslogMetadataEnabled() { + Repository repository; + try { + repository = repositoriesServiceSupplier.get().repository(getRemoteStoreTranslogRepo(settings)); + } catch (RepositoryMissingException ex) { + throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", ex); + } + BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + return Version.V_2_15_0.compareTo(minNodeVersionSupplier.get()) <= 0 + && remoteStoreSettings.isTranslogMetadataEnabled() + && blobStoreRepository.blobStore().isBlobMetadataEnabled(); + } + +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java deleted file mode 100644 index 178de406ed681..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.Version; -import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.RemoteStoreSettings; - -import java.util.function.Supplier; - -/** - * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. - * - * @opensearch.internal - */ -@ExperimentalApi -public class RemoteStorePathStrategyResolver { - - private final RemoteStoreSettings remoteStoreSettings; - private final Supplier minNodeVersionSupplier; - - public RemoteStorePathStrategyResolver(RemoteStoreSettings remoteStoreSettings, Supplier minNodeVersionSupplier) { - this.remoteStoreSettings = remoteStoreSettings; - this.minNodeVersionSupplier = minNodeVersionSupplier; - } - - public RemoteStorePathStrategy get() { - PathType pathType; - PathHashAlgorithm pathHashAlgorithm; - // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. - pathType = Version.V_2_14_0.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; - // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); - return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 27b1b88034573..9a9de6c819424 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -32,6 +32,7 @@ import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; /** * Utils for remote store @@ -181,25 +182,50 @@ public static RemoteStorePathStrategy determineRemoteStorePathStrategy(IndexMeta return new RemoteStorePathStrategy(RemoteStoreEnums.PathType.FIXED); } + /** + * Determines if translog file object metadata can be used to store checkpoint file data. + */ + public static boolean determineTranslogMetadataEnabled(IndexMetadata indexMetadata) { + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + assert remoteCustomData == null || remoteCustomData.containsKey(IndexMetadata.TRANSLOG_METADATA_KEY); + if (remoteCustomData != null && remoteCustomData.containsKey(IndexMetadata.TRANSLOG_METADATA_KEY)) { + return Boolean.parseBoolean(remoteCustomData.get(IndexMetadata.TRANSLOG_METADATA_KEY)); + } + return false; + } + /** * Generates the remote store path type information to be added to custom data of index metadata during migration * * @param clusterSettings Current Cluster settings from {@link ClusterState} - * @param discoveryNodes Current {@link DiscoveryNodes} from the cluster state + * @param discoveryNodes Current {@link DiscoveryNodes} from the cluster state * @return {@link Map} to be added as custom data in index metadata */ - public static Map determineRemoteStorePathStrategyDuringMigration( + public static Map determineRemoteStoreCustomMetadataDuringMigration( Settings clusterSettings, DiscoveryNodes discoveryNodes ) { + Map remoteCustomData = new HashMap<>(); Version minNodeVersion = discoveryNodes.getMinNodeVersion(); + + // TODO: During the document replication to a remote store migration, there should be a check to determine if the registered + // translog blobstore supports custom metadata or not. + // Currently, the blobStoreMetadataEnabled flag is set to false because the integration tests run on the local file system, which + // does not support custom metadata. + // https://github.com/opensearch-project/OpenSearch/issues/13745 + boolean blobStoreMetadataEnabled = false; + boolean translogMetadata = Version.CURRENT.compareTo(minNodeVersion) <= 0 + && CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.get(clusterSettings) + && blobStoreMetadataEnabled; + + remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(translogMetadata)); + RemoteStoreEnums.PathType pathType = Version.CURRENT.compareTo(minNodeVersion) <= 0 ? CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.get(clusterSettings) : RemoteStoreEnums.PathType.FIXED; RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm = pathType == RemoteStoreEnums.PathType.FIXED ? null : CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.get(clusterSettings); - Map remoteCustomData = new HashMap<>(); remoteCustomData.put(RemoteStoreEnums.PathType.NAME, pathType.name()); if (Objects.nonNull(pathHashAlgorithm)) { remoteCustomData.put(RemoteStoreEnums.PathHashAlgorithm.NAME, pathHashAlgorithm.name()); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 18d4a2ca6d639..3517579856d43 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3970,7 +3970,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro new RemoteStoreRefreshListener( this, this.checkpointPublisher, - remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) + remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()), + remoteStoreSettings ) ); } @@ -4976,7 +4977,14 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings); + RemoteFsTranslog.cleanup( + repository, + shardId, + getThreadPool(), + indexSettings.getRemoteStorePathStrategy(), + remoteStoreSettings, + indexSettings().isTranslogMetadataEnabled() + ); } /* @@ -5001,7 +5009,8 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings, logger, - shouldSeedRemoteStore() + shouldSeedRemoteStore(), + indexSettings().isTranslogMetadataEnabled() ); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index bfb841307af49..20afd7b2f3568 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -33,6 +33,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.threadpool.ThreadPool; @@ -45,6 +46,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -89,11 +91,13 @@ public final class RemoteStoreRefreshListener extends ReleasableRetryableRefresh private volatile long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; + private final RemoteStoreSettings remoteStoreSettings; public RemoteStoreRefreshListener( IndexShard indexShard, SegmentReplicationCheckpointPublisher checkpointPublisher, - RemoteSegmentTransferTracker segmentTracker + RemoteSegmentTransferTracker segmentTracker, + RemoteStoreSettings remoteStoreSettings ) { super(indexShard.getThreadPool()); logger = Loggers.getLogger(getClass(), indexShard.shardId()); @@ -116,6 +120,7 @@ public RemoteStoreRefreshListener( this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; + this.remoteStoreSettings = remoteStoreSettings; } @Override @@ -286,7 +291,12 @@ public void onFailure(Exception e) { // Start the segments files upload uploadNewSegments(localSegmentsPostRefresh, localSegmentsSizeMap, segmentUploadsCompletedListener); - latch.await(); + if (latch.await( + remoteStoreSettings.getClusterRemoteSegmentTransferTimeout().millis(), + TimeUnit.MILLISECONDS + ) == false) { + throw new SegmentUploadFailedException("Timeout while waiting for remote segment transfer to complete"); + } } catch (EngineException e) { logger.warn("Exception while reading SegmentInfosSnapshot", e); } diff --git a/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java b/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java new file mode 100644 index 0000000000000..bbff399fb71ff --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import java.io.IOException; + +/** + * Exception to be thrown when a segment upload fails. + * + * @opensearch.internal + */ +public class SegmentUploadFailedException extends IOException { + + /** + * Creates a new SegmentUploadFailedException. + * + * @param message error message + */ + public SegmentUploadFailedException(String message) { + super(message); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 2029b461674c7..e61e5ecd4084a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -10,7 +10,6 @@ import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.PublicApi; -import org.opensearch.common.settings.Setting; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.index.store.remote.utils.cache.CacheUsage; @@ -52,21 +51,6 @@ public class FileCache implements RefCountedCache { private final CircuitBreaker circuitBreaker; - /** - * Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for - * the file cache. For example, if 100GB disk space is configured for use as a file cache and the - * remote_data_ratio of 5 is defined, then a total of 500GB of remote data can be loaded as searchable snapshots. - * This is designed to be a safeguard to prevent oversubscribing a cluster. - * Specify a value of zero for no limit, which is the default for compatibility reasons. - */ - public static final Setting DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING = Setting.doubleSetting( - "cluster.filecache.remote_data_ratio", - 0.0, - 0.0, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - public FileCache(SegmentedCache cache, CircuitBreaker circuitBreaker) { this.theCache = cache; this.circuitBreaker = circuitBreaker; diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheSettings.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheSettings.java new file mode 100644 index 0000000000000..76086be932ecb --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheSettings.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.filecache; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Settings relate to file cache + * + * @opensearch.internal + */ +public class FileCacheSettings { + /** + * Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for + * the file cache. For example, if 100GB disk space is configured for use as a file cache and the + * remote_data_ratio of 5 is defined, then a total of 500GB of remote data can be loaded as searchable snapshots. + * This is designed to be a safeguard to prevent oversubscribing a cluster. + * Specify a value of zero for no limit, which is the default for compatibility reasons. + */ + public static final Setting DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING = Setting.doubleSetting( + "cluster.filecache.remote_data_ratio", + 0.0, + 0.0, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile double remoteDataRatio; + + public FileCacheSettings(Settings settings, ClusterSettings clusterSettings) { + setRemoteDataRatio(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, this::setRemoteDataRatio); + } + + public void setRemoteDataRatio(double remoteDataRatio) { + this.remoteDataRatio = remoteDataRatio; + } + + public double getRemoteDataRatio() { + return remoteDataRatio; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index c7f756957076c..67549c86b7dd2 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -91,6 +91,7 @@ public class RemoteFsTranslog extends Translog { private static final int SYNC_PERMIT = 1; private final Semaphore syncPermit = new Semaphore(SYNC_PERMIT); private final AtomicBoolean pauseSync = new AtomicBoolean(false); + private final boolean isTranslogMetadataEnabled; public RemoteFsTranslog( TranslogConfig config, @@ -110,6 +111,7 @@ public RemoteFsTranslog( this.startedPrimarySupplier = startedPrimarySupplier; this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); + isTranslogMetadataEnabled = indexSettings().isTranslogMetadataEnabled(); this.translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, @@ -117,7 +119,8 @@ public RemoteFsTranslog( fileTransferTracker, remoteTranslogTransferTracker, indexSettings().getRemoteStorePathStrategy(), - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); try { download(translogTransferManager, location, logger, config.shouldSeedRemote()); @@ -169,7 +172,8 @@ public static void download( RemoteStorePathStrategy pathStrategy, RemoteStoreSettings remoteStoreSettings, Logger logger, - boolean seedRemote + boolean seedRemote, + boolean isTranslogMetadataEnabled ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, @@ -188,7 +192,8 @@ public static void download( fileTransferTracker, remoteTranslogTransferTracker, pathStrategy, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); RemoteFsTranslog.download(translogTransferManager, location, logger, seedRemote); logger.trace(remoteTranslogTransferTracker.toString()); @@ -293,7 +298,8 @@ public static TranslogTransferManager buildTranslogTransferManager( FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker tracker, RemoteStorePathStrategy pathStrategy, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) { assert Objects.nonNull(pathStrategy); String indexUUID = shardId.getIndex().getUUID(); @@ -315,7 +321,16 @@ public static TranslogTransferManager buildTranslogTransferManager( .build(); BlobPath mdPath = pathStrategy.generatePath(mdPathInput); BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); - return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker, remoteStoreSettings); + return new TranslogTransferManager( + shardId, + transferService, + dataPath, + mdPath, + fileTransferTracker, + tracker, + remoteStoreSettings, + isTranslogMetadataEnabled + ); } @Override @@ -592,7 +607,8 @@ public static void cleanup( ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; @@ -607,7 +623,8 @@ public static void cleanup( fileTransferTracker, remoteTranslogTransferTracker, pathStrategy, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index 318e204daba65..71a0556910810 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -8,6 +8,9 @@ package org.opensearch.index.translog.transfer; +import java.io.ByteArrayOutputStream; +import java.util.Base64; +import java.util.HashMap; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -19,7 +22,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeFileInputStream; @@ -41,6 +44,7 @@ import java.util.Set; import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; /** @@ -140,11 +144,36 @@ public void uploadBlobAsync(InputStream inputStream, Iterable remotePath writePriority, (size, position) -> new OffsetRangeIndexInputStream(input, size, position), expectedChecksum, - listener + listener, + null ); } } + // Builds a metadata map containing the Base64-encoded checkpoint file data associated with a translog file. + static Map buildTransferFileMetadata(InputStream metadataInputStream) throws IOException { + Map metadata = new HashMap<>(); + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = metadataInputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new IOException("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + String metadataString = Base64.getEncoder().encodeToString(bytes); + metadata.put(CHECKPOINT_FILE_DATA_KEY, metadataString); + } + return metadata; + } + private void uploadBlob( TransferFileSnapshot fileSnapshot, ActionListener listener, @@ -154,6 +183,11 @@ private void uploadBlob( try { ChannelFactory channelFactory = FileChannel::open; + Map metadata = null; + if (fileSnapshot.getMetadataFileInputStream() != null) { + metadata = buildTransferFileMetadata(fileSnapshot.getMetadataFileInputStream()); + } + long contentLength; try (FileChannel channel = channelFactory.open(fileSnapshot.getPath(), StandardOpenOption.READ)) { contentLength = channel.size(); @@ -171,7 +205,8 @@ private void uploadBlob( writePriority, (size, position) -> new OffsetRangeFileInputStream(fileSnapshot.getPath(), size, position), fileSnapshot.getChecksum(), - completionListener + completionListener, + metadata ); } catch (Exception e) { @@ -187,7 +222,7 @@ private void uploadBlob( } - private void asyncBlobUpload(String fileName, String remoteFileName, long contentLength, BlobPath blobPath, WritePriority writePriority, RemoteTransferContainer.OffsetRangeInputStreamSupplier inputStreamSupplier, long expectedChecksum, ActionListener completionListener) throws IOException { + private void asyncBlobUpload(String fileName, String remoteFileName, long contentLength, BlobPath blobPath, WritePriority writePriority, RemoteTransferContainer.OffsetRangeInputStreamSupplier inputStreamSupplier, long expectedChecksum, ActionListener completionListener, Map metadata) throws IOException { BlobContainer blobContainer = blobStore.blobContainer(blobPath); assert blobContainer instanceof AsyncMultiStreamBlobContainer; boolean remoteIntegrityEnabled = ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported(); @@ -199,7 +234,8 @@ private void asyncBlobUpload(String fileName, String remoteFileName, long conten writePriority, inputStreamSupplier, expectedChecksum, - remoteIntegrityEnabled + remoteIntegrityEnabled, + metadata )) { ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), completionListener); } @@ -212,7 +248,8 @@ public InputStream downloadBlob(Iterable path, String fileName) throws I @Override @ExperimentalApi - public FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + public InputStreamWithMetadata downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + assert blobStore.isBlobMetadataEnabled(); return blobStore.blobContainer((BlobPath) path).readBlobWithMetadata(fileName); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java index dcec94edd694f..86f042af0584b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java @@ -108,6 +108,8 @@ public static class TransferFileSnapshot extends FileSnapshot { private final long primaryTerm; private Long checksum; + @Nullable + private InputStream metadataFileInputStream; public TransferFileSnapshot(Path path, long primaryTerm, Long checksum) throws IOException { super(path); @@ -128,6 +130,14 @@ public long getPrimaryTerm() { return primaryTerm; } + public void setMetadataFileInputStream(InputStream inputStream) { + this.metadataFileInputStream = inputStream; + } + + public InputStream getMetadataFileInputStream() { + return metadataFileInputStream; + } + @Override public int hashCode() { return Objects.hash(primaryTerm, super.hashCode()); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index c97690dfa8590..fceb488f3b604 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -11,7 +11,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.core.action.ActionListener; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -132,11 +132,11 @@ void uploadBlobs( * * @param path the remote path from where download should be made * @param fileName the name of the file - * @return {@link FetchBlobResult} of the remote file + * @return {@link InputStreamWithMetadata} of the remote file * @throws IOException the exception while reading the data */ @ExperimentalApi - FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; + InputStreamWithMetadata downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; void listAllInSortedOrder(Iterable path, String filenamePrefix, int limit, ActionListener> listener); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java index ef34fd31a296b..6dcdc8f8cf44a 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -12,6 +12,7 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import java.io.IOException; import java.util.Set; /** @@ -39,4 +40,10 @@ public interface TransferSnapshot { * @return the translog transfer metadata */ TranslogTransferMetadata getTranslogTransferMetadata(); + + /** + * The snapshot of the translog generational files having checkpoint file inputStream as metadata + * @return the set of translog files having checkpoint file inputStream as metadata. + */ + Set getTranslogFileSnapshotWithMetadata() throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index fb78731246a07..ae007c0c33e1e 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -64,6 +64,16 @@ public Set getTranslogFileSnapshots() { return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v1).collect(Collectors.toSet()); } + @Override + public Set getTranslogFileSnapshotWithMetadata() throws IOException { + for (Tuple tuple : translogCheckpointFileInfoTupleSet) { + TransferFileSnapshot translogFileSnapshot = tuple.v1(); + TransferFileSnapshot checkpointFileSnapshot = tuple.v2(); + translogFileSnapshot.setMetadataFileInputStream(checkpointFileSnapshot.inputStream()); + } + return getTranslogFileSnapshots(); + } + @Override public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata( diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 47638f44fd6fc..1cc39cdf442e2 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -36,6 +37,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -63,6 +65,9 @@ public class TranslogTransferManager { private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private final RemoteStoreSettings remoteStoreSettings; private static final int METADATA_FILES_TO_FETCH = 10; + // Flag to include checkpoint file data as translog file metadata during upload/download + private final boolean isTranslogMetadataEnabled; + final static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; private final Logger logger; @@ -79,7 +84,8 @@ public TranslogTransferManager( BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker remoteTranslogTransferTracker, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) { this.shardId = shardId; this.transferService = transferService; @@ -89,6 +95,7 @@ public TranslogTransferManager( this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; this.remoteStoreSettings = remoteStoreSettings; + this.isTranslogMetadataEnabled = isTranslogMetadataEnabled; } public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker() { @@ -110,8 +117,12 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); try { - toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); - toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); + if (isTranslogMetadataEnabled) { + toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshotWithMetadata())); + } else { + toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); + toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); + } if (toUpload.isEmpty()) { logger.trace("Nothing to upload for transfer"); return true; @@ -236,30 +247,78 @@ public boolean downloadTranslog(String primaryTerm, String generation, Path loca generation, location ); - // Download Checkpoint file from remote to local FS String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); - downloadToFS(ckpFileName, location, primaryTerm); - // Download translog file from remote to local FS String translogFilename = Translog.getFilename(Long.parseLong(generation)); - downloadToFS(translogFilename, location, primaryTerm); + if (isTranslogMetadataEnabled == false) { + // Download Checkpoint file, translog file from remote to local FS + downloadToFS(ckpFileName, location, primaryTerm, false); + downloadToFS(translogFilename, location, primaryTerm, false); + } else { + // Download translog.tlog file with object metadata from remote to local FS + Map metadata = downloadToFS(translogFilename, location, primaryTerm, true); + try { + assert metadata != null && !metadata.isEmpty() && metadata.containsKey(CHECKPOINT_FILE_DATA_KEY); + recoverCkpFileUsingMetadata(metadata, location, generation, translogFilename); + } catch (Exception e) { + throw new IOException("Failed to recover checkpoint file from remote", e); + } + } return true; } - private void downloadToFS(String fileName, Path location, String primaryTerm) throws IOException { + /** + * Process the provided metadata and tries to recover translog.ckp file to the FS. + */ + private void recoverCkpFileUsingMetadata(Map metadata, Path location, String generation, String fileName) + throws IOException { + + String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); + Path filePath = location.resolve(ckpFileName); + // Here, we always override the existing file if present. + deleteFileIfExists(filePath); + + String ckpDataBase64 = metadata.get(CHECKPOINT_FILE_DATA_KEY); + if (ckpDataBase64 == null) { + logger.error("Error processing metadata for translog file: {}", fileName); + throw new IllegalStateException( + "Checkpoint file data key " + CHECKPOINT_FILE_DATA_KEY + " is expected but not found in metadata for file: " + fileName + ); + } + byte[] ckpFileBytes = Base64.getDecoder().decode(ckpDataBase64); + Files.write(filePath, ckpFileBytes); + } + + private Map downloadToFS(String fileName, Path location, String primaryTerm, boolean withMetadata) throws IOException { Path filePath = location.resolve(fileName); // Here, we always override the existing file if present. // We need to change this logic when we introduce incremental download - if (Files.exists(filePath)) { - Files.delete(filePath); - } + deleteFileIfExists(filePath); + Map metadata = null; boolean downloadStatus = false; long bytesToRead = 0, downloadStartTime = System.nanoTime(); - try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { - // Capture number of bytes for stats before reading - bytesToRead = inputStream.available(); - Files.copy(inputStream, filePath); - downloadStatus = true; + try { + if (withMetadata) { + try ( + InputStreamWithMetadata inputStreamWithMetadata = transferService.downloadBlobWithMetadata( + remoteDataTransferPath.add(primaryTerm), + fileName + ) + ) { + InputStream inputStream = inputStreamWithMetadata.getInputStream(); + metadata = inputStreamWithMetadata.getMetadata(); + + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + } + } else { + try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + } + } } finally { remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); if (downloadStatus) { @@ -269,6 +328,13 @@ private void downloadToFS(String fileName, Path location, String primaryTerm) th // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync fileTransferTracker.add(fileName, true); + return metadata; + } + + private void deleteFileIfExists(Path filePath) throws IOException { + if (Files.exists(filePath)) { + Files.delete(filePath); + } } public TranslogTransferMetadata readMetadata() throws IOException { @@ -391,7 +457,11 @@ public void deleteGenerationAsync(long primaryTerm, Set generations, Runna // Add .ckp and .tlog file to translog file list which is located in basePath/ String ckpFileName = Translog.getCommitCheckpointFileName(generation); String translogFileName = Translog.getFilename(generation); - translogFiles.addAll(List.of(ckpFileName, translogFileName)); + if (isTranslogMetadataEnabled == false) { + translogFiles.addAll(List.of(ckpFileName, translogFileName)); + } else { + translogFiles.add(translogFileName); + } }); // Delete the translog and checkpoint files asynchronously deleteTranslogFilesAsync(primaryTerm, translogFiles, onCompletion); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 44af83bb839c1..57f7e402536f2 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -715,23 +715,28 @@ private synchronized void cleanCache(double stalenessThreshold) { } // Contains CleanupKey objects with open shard but invalidated readerCacheKeyId. final Set cleanupKeysFromOutdatedReaders = new HashSet<>(); - // Contains CleanupKey objects of a closed shard. + // Contains CleanupKey objects for a full cache cleanup. + final Set> cleanupKeysFromFullClean = new HashSet<>(); + // Contains CleanupKey objects for a closed shard. final Set> cleanupKeysFromClosedShards = new HashSet<>(); for (Iterator iterator = keysToClean.iterator(); iterator.hasNext();) { CleanupKey cleanupKey = iterator.next(); iterator.remove(); - if (cleanupKey.readerCacheKeyId == null || !cleanupKey.entity.isOpen()) { - // null indicates full cleanup, as does a closed shard - IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + final IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + if (cleanupKey.readerCacheKeyId == null) { + // null indicates full cleanup // Add both shardId and indexShardHashCode to uniquely identify an indexShard. + cleanupKeysFromFullClean.add(new Tuple<>(indexShard.shardId(), indexShard.hashCode())); + } else if (!cleanupKey.entity.isOpen()) { + // The shard is closed cleanupKeysFromClosedShards.add(new Tuple<>(indexShard.shardId(), indexShard.hashCode())); } else { cleanupKeysFromOutdatedReaders.add(cleanupKey); } } - if (cleanupKeysFromOutdatedReaders.isEmpty() && cleanupKeysFromClosedShards.isEmpty()) { + if (cleanupKeysFromOutdatedReaders.isEmpty() && cleanupKeysFromFullClean.isEmpty() && cleanupKeysFromClosedShards.isEmpty()) { return; } @@ -740,15 +745,15 @@ private synchronized void cleanCache(double stalenessThreshold) { for (Iterator> iterator = cache.keys().iterator(); iterator.hasNext();) { ICacheKey key = iterator.next(); Key delegatingKey = key.key; - if (cleanupKeysFromClosedShards.contains(new Tuple<>(delegatingKey.shardId, delegatingKey.indexShardHashCode))) { - // Since the shard is closed, the cache should drop stats for this shard. - dimensionListsToDrop.add(key.dimensions); + Tuple shardIdInfo = new Tuple<>(delegatingKey.shardId, delegatingKey.indexShardHashCode); + if (cleanupKeysFromFullClean.contains(shardIdInfo) || cleanupKeysFromClosedShards.contains(shardIdInfo)) { iterator.remove(); } else { CacheEntity cacheEntity = cacheEntityLookup.apply(delegatingKey.shardId).orElse(null); if (cacheEntity == null) { // If cache entity is null, it means that index or shard got deleted/closed meanwhile. // So we will delete this key. + dimensionListsToDrop.add(key.dimensions); iterator.remove(); } else { CleanupKey cleanupKey = new CleanupKey(cacheEntity, delegatingKey.readerCacheKeyId); @@ -757,6 +762,12 @@ private synchronized void cleanCache(double stalenessThreshold) { } } } + + if (cleanupKeysFromClosedShards.contains(shardIdInfo)) { + // Since the shard is closed, the cache should drop stats for this shard. + // This should not happen on a full cache cleanup. + dimensionListsToDrop.add(key.dimensions); + } } for (List closedDimensions : dimensionListsToDrop) { // Invalidate a dummy key containing the dimensions we need to drop stats for diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 0bd4c7aedfc03..074186f64a75d 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -80,6 +80,18 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * This setting is used to disable uploading translog.ckp file as metadata to translog.tlog. This setting is effective only for + * repositories that supports metadata read and write with metadata and is applicable for only remote store enabled clusters. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_TRANSLOG_METADATA = Setting.boolSetting( + "cluster.remote_store.index.translog.translog_metadata", + true, + Property.NodeScope, + Property.Dynamic + ); + /** * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} @@ -105,12 +117,25 @@ public class RemoteStoreSettings { Property.NodeScope ); + /** + * Controls timeout value while uploading segment files to remote segment store + */ + public static final Setting CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.segment.transfer_timeout", + TimeValue.timeValueMinutes(30), + TimeValue.timeValueMinutes(10), + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; + private volatile TimeValue clusterRemoteSegmentTransferTimeout; private volatile RemoteStoreEnums.PathType pathType; private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; + private volatile boolean isTranslogMetadataEnabled; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); @@ -134,11 +159,20 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pathType = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setPathType); + isTranslogMetadataEnabled = clusterSettings.get(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, this::setTranslogMetadataEnabled); + pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); maxRemoteTranslogReaders = CLUSTER_REMOTE_MAX_TRANSLOG_READERS.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_MAX_TRANSLOG_READERS, this::setMaxRemoteTranslogReaders); + + clusterRemoteSegmentTransferTimeout = CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, + this::setClusterRemoteSegmentTransferTimeout + ); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -161,10 +195,18 @@ public TimeValue getClusterRemoteTranslogTransferTimeout() { return clusterRemoteTranslogTransferTimeout; } + public TimeValue getClusterRemoteSegmentTransferTimeout() { + return clusterRemoteSegmentTransferTimeout; + } + private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; } + private void setClusterRemoteSegmentTransferTimeout(TimeValue clusterRemoteSegmentTransferTimeout) { + this.clusterRemoteSegmentTransferTimeout = clusterRemoteSegmentTransferTimeout; + } + @ExperimentalApi public RemoteStoreEnums.PathType getPathType() { return pathType; @@ -179,6 +221,14 @@ private void setPathType(RemoteStoreEnums.PathType pathType) { this.pathType = pathType; } + private void setTranslogMetadataEnabled(boolean isTranslogMetadataEnabled) { + this.isTranslogMetadataEnabled = isTranslogMetadataEnabled; + } + + public boolean isTranslogMetadataEnabled() { + return isTranslogMetadataEnabled; + } + private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { this.pathHashAlgorithm = pathHashAlgorithm; } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c0e058e6cc984..c2866247fd723 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -59,6 +59,7 @@ import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -154,6 +155,7 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.store.remote.filecache.FileCacheFactory; +import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.RemoteStoreSettings; @@ -208,6 +210,7 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SystemIndexPlugin; +import org.opensearch.plugins.TelemetryAwarePlugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; import org.opensearch.ratelimitting.admissioncontrol.transport.AdmissionControlTransportInterceptor; @@ -275,6 +278,7 @@ import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -604,23 +608,24 @@ protected Node( getCustomNameResolvers(pluginsService.filterPlugins(DiscoveryPlugin.class)) ); - List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class); - final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); - clusterService.addStateApplier(scriptService); - resourcesToClose.add(clusterService); - final Set> consistentSettings = settingsModule.getConsistentSettings(); - if (consistentSettings.isEmpty() == false) { - clusterService.addLocalNodeMasterListener( - new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() - ); - } - TracerFactory tracerFactory; MetricsRegistryFactory metricsRegistryFactory; if (FeatureFlags.isEnabled(TELEMETRY)) { - final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, settingsModule.getClusterSettings()); if (telemetrySettings.isTracingFeatureEnabled() || telemetrySettings.isMetricsFeatureEnabled()) { List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + List telemetryPluginsImplementingTelemetryAware = telemetryPlugins.stream() + .filter(a -> TelemetryAwarePlugin.class.isAssignableFrom(a.getClass())) + .collect(toList()); + if (telemetryPluginsImplementingTelemetryAware.isEmpty() == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "Telemetry plugins %s should not implement TelemetryAwarePlugin interface", + telemetryPluginsImplementingTelemetryAware + ) + ); + } TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); if (telemetrySettings.isTracingFeatureEnabled()) { tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); @@ -646,6 +651,24 @@ protected Node( resourcesToClose.add(tracer::close); resourcesToClose.add(metricsRegistry::close); + final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); + + List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class); + final ClusterService clusterService = new ClusterService( + settings, + settingsModule.getClusterSettings(), + threadPool, + clusterManagerMetrics + ); + clusterService.addStateApplier(scriptService); + resourcesToClose.add(clusterService); + final Set> consistentSettings = settingsModule.getConsistentSettings(); + if (consistentSettings.isEmpty() == false) { + clusterService.addLocalNodeMasterListener( + new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() + ); + } + final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); final UsageService usageService = new UsageService(); @@ -673,7 +696,8 @@ protected Node( clusterPlugins, clusterInfoService, snapshotsInfoService, - threadPool.getThreadContext() + threadPool.getThreadContext(), + clusterManagerMetrics ); modules.add(clusterModule); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); @@ -879,7 +903,8 @@ protected Node( systemIndices, forbidPrivateIndexSettings, awarenessReplicaBalance, - remoteStoreSettings + remoteStoreSettings, + repositoriesServiceReference::get ); pluginsService.filterPlugins(Plugin.class) .forEach( @@ -913,6 +938,30 @@ protected Node( ) .collect(Collectors.toList()); + Collection telemetryAwarePluginComponents = pluginsService.filterPlugins(TelemetryAwarePlugin.class) + .stream() + .flatMap( + p -> p.createComponents( + client, + clusterService, + threadPool, + resourceWatcherService, + scriptService, + xContentRegistry, + environment, + nodeEnvironment, + namedWriteableRegistry, + clusterModule.getIndexNameExpressionResolver(), + repositoriesServiceReference::get, + tracer, + metricsRegistry + ).stream() + ) + .collect(Collectors.toList()); + + // Add the telemetryAwarePlugin components to the existing pluginComponents collection. + pluginComponents.addAll(telemetryAwarePluginComponents); + // register all standard SearchRequestOperationsCompositeListenerFactory to the SearchRequestOperationsCompositeListenerFactory final SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory = new SearchRequestOperationsCompositeListenerFactory( @@ -1111,7 +1160,8 @@ protected Node( metadataIndexUpgradeService, shardLimitValidator, indicesService, - clusterInfoService::getClusterInfo + clusterInfoService::getClusterInfo, + new FileCacheSettings(settings, clusterService.getClusterSettings())::getRemoteDataRatio ); RemoteStoreRestoreService remoteStoreRestoreService = new RemoteStoreRestoreService( diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index bcd521bd8eef6..9a8bcc419144e 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -30,7 +30,7 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.common.util.FeatureFlags.REMOTE_ROUTING_TABLE_EXPERIMENTAL; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; /** * This is an abstraction for validating and storing information specific to remote backed storage nodes. @@ -50,7 +50,7 @@ public class RemoteStoreNodeAttribute { + "." + CryptoMetadata.SETTINGS_KEY; public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; - public static final String REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.routing.repository"; + public static final String REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.routing_table.repository"; private final RepositoriesMetadata repositoriesMetadata; @@ -115,11 +115,11 @@ private Map validateSettingsAttributesNonNull(DiscoveryNode node .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> validateAttributeNonNull(node, key))); -// if (settingsMap.isEmpty()) { -// throw new IllegalStateException( -// "joining node [" + node + "] doesn't have settings attribute for [" + repositoryName + "] repository" -// ); -// } + if (settingsMap.isEmpty()) { + throw new IllegalStateException( + "joining node [" + node + "] doesn't have settings attribute for [" + repositoryName + "] repository" + ); + } return settingsMap; } @@ -163,7 +163,7 @@ private Set getValidatedRepositoryNames(DiscoveryNode node) { } else if (node.getAttributes().containsKey(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); } - if (node.getAttributes().containsKey(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)){ + if (node.getAttributes().containsKey(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)); } @@ -197,17 +197,13 @@ public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { && isRemoteClusterStateAttributePresent(settings); } - public static boolean isRemoteRoutingTableAttributePresent(Settings settings) { + private static boolean isRemoteRoutingTableAttributePresent(Settings settings) { return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) .isEmpty() == false; } public static boolean isRemoteRoutingTableEnabled(Settings settings) { - assert FeatureFlags.isEnabled(REMOTE_ROUTING_TABLE_EXPERIMENTAL) == true; - assert RemoteRoutingTableService.REMOTE_ROUTING_TABLE_ENABLED_SETTING.get(settings) == true; - assert isRemoteRoutingTableAttributePresent(settings) == true; - return FeatureFlags.isEnabled(REMOTE_ROUTING_TABLE_EXPERIMENTAL) && RemoteRoutingTableService.REMOTE_ROUTING_TABLE_ENABLED_SETTING.get(settings) - && isRemoteRoutingTableAttributePresent(settings); + return FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && isRemoteRoutingTableAttributePresent(settings); } public RepositoriesMetadata getRepositoriesMetadata() { @@ -254,6 +250,21 @@ public int hashCode() { return hashCode; } + /** + * Checks if 2 instances are equal, with option to skip check for a list of repos. + * * + * @param o other instance + * @param reposToSkip list of repos to skip check for equality + * @return {@code true} iff both instances are equal, not including the repositories in both instances if they are part of reposToSkip. + */ + public boolean equalsWithRepoSkip(Object o, List reposToSkip) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; + return this.getRepositoriesMetadata().equalsIgnoreGenerationsWithRepoSkip(that.getRepositoriesMetadata(), reposToSkip); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index a6eefd2f4fd17..f08c9c738f1b4 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.util.SPIClassIterator; import org.opensearch.Build; @@ -762,6 +763,7 @@ static void reloadLuceneSPI(ClassLoader loader) { // Codecs: PostingsFormat.reloadPostingsFormats(loader); DocValuesFormat.reloadDocValuesFormats(loader); + KnnVectorsFormat.reloadKnnVectorsFormat(loader); Codec.reloadCodecs(loader); } diff --git a/server/src/main/java/org/opensearch/plugins/TelemetryAwarePlugin.java b/server/src/main/java/org/opensearch/plugins/TelemetryAwarePlugin.java new file mode 100644 index 0000000000000..42cab326f88bf --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TelemetryAwarePlugin.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.Collections; +import java.util.function.Supplier; + +/** + * Plugin that provides the telemetry registries to build component with telemetry and also provide a way to + * pass telemetry registries to the implementing plugins for adding instrumentation in the code. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TelemetryAwarePlugin { + + /** + * Returns components added by this plugin. + *

+ * Any components returned that implement {@link LifecycleComponent} will have their lifecycle managed. + * Note: To aid in the migration away from guice, all objects returned as components will be bound in guice + * to themselves. + * + * @param client A client to make requests to the system + * @param clusterService A service to allow watching and updating cluster state + * @param threadPool A service to allow retrieving an executor to run an async action + * @param resourceWatcherService A service to watch for changes to node local files + * @param scriptService A service to allow running scripts on the local node + * @param xContentRegistry the registry for extensible xContent parsing + * @param environment the environment for path and setting configurations + * @param nodeEnvironment the node environment used coordinate access to the data paths + * @param namedWriteableRegistry the registry for {@link NamedWriteable} object parsing + * @param indexNameExpressionResolver A service that resolves expression to index and alias names + * @param repositoriesServiceSupplier A supplier for the service that manages snapshot repositories; will return null when this method + * is called, but will return the repositories service once the node is initialized. + * @param tracer the tracer to add tracing instrumentation. + * @param metricsRegistry the registry for metrics instrumentation. + */ + default Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier, + Tracer tracer, + MetricsRegistry metricsRegistry + ) { + return Collections.emptyList(); + } +} diff --git a/server/src/main/java/org/opensearch/script/Script.java b/server/src/main/java/org/opensearch/script/Script.java index 9e74314c281cd..f18bd992cb00d 100644 --- a/server/src/main/java/org/opensearch/script/Script.java +++ b/server/src/main/java/org/opensearch/script/Script.java @@ -589,7 +589,7 @@ public Script(StreamInput in) throws IOException { @SuppressWarnings("unchecked") Map options = (Map) (Map) in.readMap(); this.options = options; - this.params = in.readMap(); + this.params = Collections.unmodifiableMap(in.readMap()); } @Override diff --git a/server/src/main/java/org/opensearch/script/UpdateScript.java b/server/src/main/java/org/opensearch/script/UpdateScript.java index 86697e9ae550e..f6355fe24817b 100644 --- a/server/src/main/java/org/opensearch/script/UpdateScript.java +++ b/server/src/main/java/org/opensearch/script/UpdateScript.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import java.util.Collections; import java.util.Map; /** @@ -53,7 +54,7 @@ public abstract class UpdateScript { private final Map ctx; public UpdateScript(Map params, Map ctx) { - this.params = params; + this.params = Collections.unmodifiableMap(params); this.ctx = ctx; } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 07248a0719c3a..6c22567d8cf0d 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -1004,6 +1004,37 @@ public SearchSourceBuilder derivedField(String name, String type, Script script) return this; } + /** + * Adds a derived field with the given name with provided type, script and other parameters + * @param name name of the derived field + * @param type type of the derived field + * @param script script associated with derived field + * @param properties map of field name and type of field for nested fields within object derived field + * @param sourceIndexedField source text field which is indexed to filter documents for better performance + * @param format date format + * @param ignoreMalformed ignores malformed fields instead of failing search request + */ + public SearchSourceBuilder derivedField( + String name, + String type, + Script script, + Map properties, + String sourceIndexedField, + String format, + Boolean ignoreMalformed + ) { + if (derivedFields == null) { + derivedFields = new ArrayList<>(); + } + DerivedField derivedField = new DerivedField(name, type, script); + derivedField.setProperties(properties); + derivedField.setSourceIndexedField(sourceIndexedField); + derivedField.setFormat(format); + derivedField.setIgnoreMalformed(ignoreMalformed); + derivedFields.add(derivedField); + return this; + } + /** * Sets the boost a specific index or alias will receive when the query is executed * against it. diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index 69f86bb91cc6e..6ae90b0ef8434 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -304,7 +304,7 @@ private static BoundaryScanner getBoundaryScanner(Field field) { return DEFAULT_WORD_BOUNDARY_SCANNER; case CHARS: if (fieldOptions.boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN - || fieldOptions.boundaryChars() != SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) { + || fieldOptions.boundaryChars() != HighlightBuilder.DEFAULT_BOUNDARY_CHARS) { return new SimpleBoundaryScanner(fieldOptions.boundaryMaxScan(), fieldOptions.boundaryChars()); } return DEFAULT_SIMPLE_BOUNDARY_SCANNER; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java index 0e7c3cf30ccec..44ef3a90395b8 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -111,6 +111,8 @@ public class HighlightBuilder extends AbstractHighlighterBuilder" }; + static final Character[] DEFAULT_BOUNDARY_CHARS = HighlightBuilder.convertCharArray(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS); + /** * a {@link FieldOptions} with default settings */ @@ -124,7 +126,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilder dataToFileCacheSizeRatioSupplier; + private static final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor = new CleanRestoreStateTaskExecutor(); public RestoreService( @@ -214,7 +215,8 @@ public RestoreService( MetadataIndexUpgradeService metadataIndexUpgradeService, ShardLimitValidator shardLimitValidator, IndicesService indicesService, - Supplier clusterInfoSupplier + Supplier clusterInfoSupplier, + Supplier dataToFileCacheSizeRatioSupplier ) { this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -228,6 +230,7 @@ public RestoreService( this.shardLimitValidator = shardLimitValidator; this.indicesService = indicesService; this.clusterInfoSupplier = clusterInfoSupplier; + this.dataToFileCacheSizeRatioSupplier = dataToFileCacheSizeRatioSupplier; // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. restoreSnapshotTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.RESTORE_SNAPSHOT_KEY, true); @@ -399,9 +402,7 @@ public ClusterState execute(ClusterState currentState) { if (isRemoteSnapshot) { snapshotIndexMetadata = addSnapshotToIndexSettings(snapshotIndexMetadata, snapshot, snapshotIndexId); } - final boolean isSearchableSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match( - snapshotIndexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) - ); + final boolean isSearchableSnapshot = snapshotIndexMetadata.isRemoteSnapshot(); final boolean isRemoteStoreShallowCopy = Boolean.TRUE.equals( snapshotInfo.isRemoteStoreIndexShallowCopyEnabled() ) && metadata.index(index).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false); @@ -472,7 +473,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteStorePathStrategyInCustomData(indexMdBuilder, false); + createIndexService.addRemoteStoreCustomMetadata(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), @@ -855,7 +856,7 @@ private IndexMetadata updateIndexSettings( private void validateSearchableSnapshotRestorable(long totalRestorableRemoteIndexesSize) { ClusterInfo clusterInfo = clusterInfoSupplier.get(); - double remoteDataToFileCacheRatio = DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(clusterService.getSettings()); + final double remoteDataToFileCacheRatio = dataToFileCacheSizeRatioSupplier.get(); Map nodeFileCacheStats = clusterInfo.getNodeFileCacheStats(); if (nodeFileCacheStats.isEmpty() || remoteDataToFileCacheRatio <= 0.01f) { return; @@ -869,7 +870,7 @@ private void validateSearchableSnapshotRestorable(long totalRestorableRemoteInde .sum(); Predicate isRemoteSnapshotShard = shardRouting -> shardRouting.primary() - && indicesService.indexService(shardRouting.index()).getIndexSettings().isRemoteSnapshot(); + && clusterService.state().getMetadata().getIndexSafe(shardRouting.index()).isRemoteSnapshot(); ShardsIterator shardsIterator = clusterService.state() .routingTable() diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 50ffd7322544a..3d6a54055d3d5 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -740,7 +740,8 @@ public void testRolloverClusterState() throws Exception { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -879,7 +880,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -1058,7 +1060,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { new SystemIndices(emptyMap()), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index cf7080ab2fc06..ff9e41ee7c784 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -55,6 +55,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -153,7 +154,11 @@ private void indicesThatCannotBeCreatedTestCase( null, new IndexingPressureService( Settings.EMPTY, - new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) + ClusterServiceUtils.createClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null + ) ), null, new SystemIndices(emptyMap()), diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index da9156ccdb71a..a94a5d60b3f5a 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -71,6 +71,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -170,7 +171,11 @@ class TestTransportBulkAction extends TransportBulkAction { ), new IndexingPressureService( SETTINGS, - new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) + ClusterServiceUtils.createClusterService( + SETTINGS, + new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null + ) ), null, new SystemIndices(emptyMap()), diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java index f009988ffae17..91a2552ac3f04 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java @@ -45,6 +45,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -90,7 +91,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { LoggerContext context = (LoggerContext) LogManager.getContext(false); SearchPhaseContext searchPhaseContext1 = new MockSearchPhaseContext(1); - ClusterService clusterService1 = new ClusterService( + ClusterService clusterService1 = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null @@ -99,7 +100,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { int numberOfLoggersBefore = context.getLoggers().size(); SearchPhaseContext searchPhaseContext2 = new MockSearchPhaseContext(1); - ClusterService clusterService2 = new ClusterService( + ClusterService clusterService2 = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null @@ -124,7 +125,7 @@ public void testOnRequestEnd() throws InterruptedException { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "0ms"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService, logger); final List searchListenersList = new ArrayList<>(List.of(searchRequestSlowLog)); @@ -157,7 +158,7 @@ public void testConcurrentOnRequestEnd() throws InterruptedException { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "-1"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService, logger); final List searchListenersList = new ArrayList<>(List.of(searchRequestSlowLog)); @@ -321,7 +322,7 @@ public void testLevelSettingWarn() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(level, searchRequestSlowLog.getLevel()); } @@ -332,7 +333,7 @@ public void testLevelSettingDebug() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(level, searchRequestSlowLog.getLevel().toString()); } @@ -343,7 +344,7 @@ public void testLevelSettingFail() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); try { new SearchRequestSlowLog(clusterService); @@ -363,7 +364,7 @@ public void testSetThresholds() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "100ms"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(TimeValue.timeValueMillis(400).nanos(), searchRequestSlowLog.getWarnThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), searchRequestSlowLog.getInfoThreshold()); @@ -380,7 +381,7 @@ public void testSetThresholdsUnits() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "100nanos"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(TimeValue.timeValueSeconds(400).nanos(), searchRequestSlowLog.getWarnThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), searchRequestSlowLog.getInfoThreshold()); @@ -395,7 +396,7 @@ public void testSetThresholdsDefaults() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "200ms"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(TimeValue.timeValueMillis(400).nanos(), searchRequestSlowLog.getWarnThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), searchRequestSlowLog.getInfoThreshold()); @@ -409,7 +410,7 @@ public void testSetThresholdsError() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "NOT A TIME VALUE"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); try { new SearchRequestSlowLog(clusterService); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index 557e4dc2ca8c5..ae35d37fe77b2 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -72,6 +72,8 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.gateway.TestShardBatchGatewayAllocator; @@ -92,7 +94,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void setUp() throws Exception { super.setUp(); threadContext = new ThreadContext(Settings.EMPTY); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null @@ -167,7 +169,7 @@ public void testRegisterAllocationDeciderDuplicate() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings)); } - }), clusterInfoService, null, threadContext) + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)) ); assertEquals(e.getMessage(), "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice"); } @@ -178,7 +180,7 @@ public void testRegisterAllocationDecider() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new FakeAllocationDecider()); } - }), clusterInfoService, null, threadContext); + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); } @@ -188,7 +190,7 @@ private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, Str public Map> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonMap(name, supplier); } - }), clusterInfoService, null, threadContext); + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); } public void testRegisterShardsAllocator() { @@ -209,7 +211,15 @@ public void testUnknownShardsAllocator() { Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new ClusterModule(settings, clusterService, Collections.emptyList(), clusterInfoService, null, threadContext) + () -> new ClusterModule( + settings, + clusterService, + Collections.emptyList(), + clusterInfoService, + null, + threadContext, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ) ); assertEquals("Unknown ShardsAllocator [dne]", e.getMessage()); } @@ -295,7 +305,8 @@ public void testRejectsReservedExistingShardsAllocatorName() { Collections.singletonList(existingShardsAllocatorPlugin(GatewayAllocator.ALLOCATOR_NAME)), clusterInfoService, null, - threadContext + threadContext, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); expectThrows( IllegalArgumentException.class, @@ -310,7 +321,8 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { Arrays.asList(existingShardsAllocatorPlugin("duplicate"), existingShardsAllocatorPlugin("duplicate")), clusterInfoService, null, - threadContext + threadContext, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 47dbf85c13b1f..537b2d13ec08a 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -54,6 +54,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; @@ -83,7 +84,13 @@ public void testScheduling() { final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); - final ClusterApplierService clusterApplierService = new ClusterApplierService("test", settings, clusterSettings, threadPool) { + final ClusterApplierService clusterApplierService = new ClusterApplierService( + "test", + settings, + clusterSettings, + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { return new MockSinglePrioritizingExecutor("mock-executor", deterministicTaskQueue, threadPool); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 3e343e95f6c4b..9cb1bd0b57132 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -72,6 +72,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; @@ -944,6 +945,145 @@ public void testNodeJoinInMixedMode() { JoinTaskExecutor.ensureNodesCompatibility(joiningNode2, currentNodes, metadata); } + public void testRemoteRoutingTableRepoAbsentNodeJoin() { + + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinRepoPresentInJoiningNode() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinRepoPresentInExistingNode() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + } + + public void testRemoteRoutingTableNodeJoinRepoPresentInBothNode() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifference() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final DiscoveryNode existingNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode2).add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifferenceMixedMode() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final DiscoveryNode existingNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode2).add(existingNode).localNodeId(existingNode.getId()).build()) + .metadata(metadata) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) throws Exception { @@ -985,6 +1125,7 @@ private DiscoveryNode newDiscoveryNode(Map attributes) { private static final String TRANSLOG_REPO = "translog-repo"; private static final String CLUSTER_STATE_REPO = "cluster-state-repo"; private static final String COMMON_REPO = "remote-repo"; + private static final String ROUTING_TABLE_REPO = "routing-table-repo"; private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName) { return remoteStoreNodeAttributes(segmentRepoName, translogRepoName, CLUSTER_STATE_REPO); @@ -1049,6 +1190,28 @@ private Map remoteStateNodeAttributes(String clusterStateRepo) { }; } + private Map remoteRoutingTableAttributes(String repoName) { + String routingTableRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String routingTableRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return new HashMap<>() { + { + put(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName); + putIfAbsent(routingTableRepositoryTypeAttributeKey, "s3"); + putIfAbsent(routingTableRepositorySettingsAttributeKeyPrefix + "bucket", "state_bucket"); + putIfAbsent(routingTableRepositorySettingsAttributeKeyPrefix + "base_path", "/state/path"); + } + }; + } + private void validateAttributes(Map remoteStoreNodeAttributes, ClusterState currentState, DiscoveryNode existingNode) { DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); Exception e = assertThrows( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index e1fffb3a49163..32cb153c32029 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -33,6 +33,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; @@ -61,6 +62,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; @@ -179,7 +181,8 @@ private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterStat ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); AtomicReference clusterStateRef = new AtomicReference<>(initialState); clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index fad98a6609c3b..0072649e4ca72 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -56,6 +56,7 @@ import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -87,6 +88,8 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; @@ -116,6 +119,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -156,6 +160,7 @@ import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.hamcrest.Matchers.containsString; @@ -176,6 +181,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private CreateIndexClusterStateUpdateRequest request; private QueryShardContext queryShardContext; private ClusterSettings clusterSettings; + private IndicesService indicesServices; + private RepositoriesService repositoriesService; + private Supplier repositoriesServiceSupplier; private static final String segmentRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() @@ -188,6 +196,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { public void setup() throws Exception { super.setUp(); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + indicesServices = mock(IndicesService.class); + repositoriesServiceSupplier = mock(Supplier.class); + repositoriesService = mock(RepositoriesService.class); } @Before @@ -694,7 +705,7 @@ public void testValidateIndexName() throws Exception { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -705,7 +716,8 @@ public void testValidateIndexName() throws Exception { new SystemIndices(Collections.emptyMap()), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); validateIndexName( checkerService, @@ -781,7 +793,7 @@ public void testValidateDotIndex() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -792,7 +804,8 @@ public void testValidateDotIndex() { new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1207,7 +1220,7 @@ public void testvalidateIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( settings, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1218,7 +1231,8 @@ public void testvalidateIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); @@ -1327,7 +1341,7 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { final MetadataCreateIndexService checkerService = new MetadataCreateIndexService( forceClusterSettingEnabled, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1338,7 +1352,8 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); // Use DOCUMENT replication type setting for index creation final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); @@ -1453,7 +1468,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1464,7 +1479,8 @@ public void testRemoteStoreDisabledByUserIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1488,7 +1504,7 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1499,7 +1515,8 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1528,7 +1545,7 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1539,7 +1556,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1746,10 +1764,16 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); ThreadPool threadPool = new TestThreadPool(getTestName()); + BlobStoreRepository repositoryMock = mock(BlobStoreRepository.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + when(repositoriesService.repository(getRemoteStoreTranslogRepo(settings))).thenReturn(repositoryMock); + BlobStore blobStoreMock = mock(BlobStore.class); + when(repositoryMock.blobStore()).thenReturn(blobStoreMock); + when(blobStoreMock.isBlobMetadataEnabled()).thenReturn(randomBoolean()); MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( settings, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1760,7 +1784,8 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), - remoteStoreSettings + remoteStoreSettings, + repositoriesServiceSupplier ); CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() @@ -1873,7 +1898,7 @@ public void testIndexLifecycleNameSetting() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1884,7 +1909,8 @@ public void testIndexLifecycleNameSetting() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 0b99ffac67ee8..6643d6e13289b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -57,8 +57,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexTemplateMissingException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; @@ -2039,10 +2041,13 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr when(clusterService.state()).thenReturn(clusterState); when(clusterService.getSettings()).thenReturn(settings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + IndicesService indicesServices = mock(IndicesService.class); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + // when(indicesServices.getRepositoriesServiceSupplier()).thenReturn(() -> repositoriesService); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false), @@ -2053,7 +2058,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java index 64d9c243304d8..cce75105dd33f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java @@ -33,6 +33,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterInfo; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.EmptyClusterInfoService; @@ -56,6 +57,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -77,6 +81,12 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class AllocationServiceTests extends OpenSearchTestCase { @@ -137,6 +147,16 @@ public void testAssignsPrimariesInPriorityOrderThenReplicas() { .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + final Histogram rerouteHistogram = mock(Histogram.class); + final Histogram mockedHistogram = mock(Histogram.class); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("reroute.latency")) { + return rerouteHistogram; + } + return mockedHistogram; + }); final AllocationService allocationService = new AllocationService( new AllocationDeciders( Arrays.asList( @@ -158,7 +178,8 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } }, new EmptyClusterInfoService(), - EmptySnapshotsInfoService.INSTANCE + EmptySnapshotsInfoService.INSTANCE, + new ClusterManagerMetrics(metricsRegistry) ); final String unrealisticAllocatorName = "unrealistic"; @@ -258,10 +279,18 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing assertThat(routingTable3.index("mediumPriority").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4)); assertTrue(routingTable3.index("lowPriority").allPrimaryShardsActive()); assertThat(routingTable3.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty()); + + verify(rerouteHistogram, times(3)).record(anyDouble()); } public void testExplainsNonAllocationOfShardWithUnknownAllocator() { - final AllocationService allocationService = new AllocationService(null, null, null, null); + final AllocationService allocationService = new AllocationService( + null, + null, + null, + null, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); allocationService.setExistingShardsAllocators( Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, new TestGatewayAllocator()) ); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index bde8a45359814..652633e689b93 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -69,7 +69,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.EmptySnapshotsInfoService; @@ -95,6 +94,7 @@ import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -381,6 +381,7 @@ public void testFileCacheRemoteShardsDecisions() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") + .put(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .build(); // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available @@ -406,7 +407,6 @@ public void testFileCacheRemoteShardsDecisions() { DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() .put(IndexMetadata.builder("test").settings(remoteIndexSettings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) - .persistentSettings(Settings.builder().put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5).build()) .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java index 3931fd5844c47..131d1a4759ba2 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java @@ -37,7 +37,8 @@ import java.util.function.Supplier; import static org.mockito.Mockito.*; -import static org.opensearch.common.util.FeatureFlags.REMOTE_ROUTING_TABLE_EXPERIMENTAL; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; public class RemoteRoutingTableServiceTests extends OpenSearchTestCase { @@ -65,7 +66,7 @@ public void setup() { blobStoreRepository = mock(BlobStoreRepository.class); when(repositoriesService.repository("routing_repository")).thenReturn(blobStoreRepository); - Settings nodeSettings = Settings.builder().put(REMOTE_ROUTING_TABLE_EXPERIMENTAL, "true").build(); + Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); FeatureFlags.initializeFeatureFlags(nodeSettings); remoteRoutingTableService = new RemoteRoutingTableService( diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index c5ed505e6bbf2..3cbdfb80067d7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; @@ -51,6 +52,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -74,15 +77,30 @@ import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; public class ClusterApplierServiceTests extends OpenSearchTestCase { private static ThreadPool threadPool; private TimedClusterApplierService clusterApplierService; + private static MetricsRegistry metricsRegistry; + private static Histogram applierslatenctHistogram; + private static Histogram listenerslatenctHistogram; @BeforeClass public static void createThreadPool() { threadPool = new TestThreadPool(ClusterApplierServiceTests.class.getName()); + metricsRegistry = mock(MetricsRegistry.class); + applierslatenctHistogram = mock(Histogram.class); + listenerslatenctHistogram = mock(Histogram.class); } @AfterClass @@ -96,6 +114,13 @@ public static void stopThreadPool() { @Before public void setUp() throws Exception { super.setUp(); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("appliers.latency")) { + return applierslatenctHistogram; + } + return listenerslatenctHistogram; + }); clusterApplierService = createTimedClusterService(true); } @@ -110,7 +135,8 @@ private TimedClusterApplierService createTimedClusterService(boolean makeCluster TimedClusterApplierService timedClusterApplierService = new TimedClusterApplierService( Settings.builder().put("cluster.name", "ClusterApplierServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(metricsRegistry) ); timedClusterApplierService.setNodeConnectionsService(createNoOpNodeConnectionsService()); timedClusterApplierService.setInitialState( @@ -194,6 +220,8 @@ public void onFailure(String source, Exception e) { }); assertBusy(mockAppender::assertAllExpectationsMatched); } + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } @TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") @@ -291,6 +319,8 @@ public void onFailure(String source, Exception e) { latch.await(); mockAppender.assertAllExpectationsMatched(); } + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testLocalNodeClusterManagerListenerCallbacks() { @@ -329,6 +359,10 @@ public void offClusterManager() { setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(true)); + verify(listenerslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(listenerslatenctHistogram); + verifyNoInteractions(applierslatenctHistogram); + timedClusterApplierService.close(); } @@ -366,6 +400,10 @@ public void offMaster() { setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(false)); + verify(listenerslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(listenerslatenctHistogram); + verifyNoInteractions(applierslatenctHistogram); + timedClusterApplierService.close(); } @@ -405,6 +443,10 @@ public void onFailure(String source, Exception e) { latch.await(); assertNull(error.get()); assertTrue(applierCalled.get()); + + verify(applierslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierBubblesUpExceptionsInApplier() throws InterruptedException { @@ -435,6 +477,9 @@ public void onFailure(String source, Exception e) { latch.await(); assertNotNull(error.get()); assertThat(error.get().getMessage(), containsString("dummy exception")); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierBubblesUpExceptionsInSettingsApplier() throws InterruptedException { @@ -478,6 +523,9 @@ public void onFailure(String source, Exception e) { latch.await(); assertNotNull(error.get()); assertThat(error.get().getMessage(), containsString("illegal value can't update")); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierSwallowsExceptionInListener() throws InterruptedException { @@ -509,6 +557,9 @@ public void onFailure(String source, Exception e) { latch.await(); assertNull(error.get()); assertTrue(applierCalled.get()); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierCanCreateAnObserver() throws InterruptedException { @@ -565,6 +616,10 @@ public void onFailure(String source, Exception e) { latch.await(); assertNull(error.get()); assertTrue(applierCalled.get()); + + verify(applierslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testThreadContext() throws InterruptedException { @@ -609,6 +664,9 @@ public void onFailure(String source, Exception e) { } latch.await(); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } static class TimedClusterApplierService extends ClusterApplierService { @@ -617,8 +675,13 @@ static class TimedClusterApplierService extends ClusterApplierService { volatile Long currentTimeOverride = null; boolean applicationMayFail; - TimedClusterApplierService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - super("test_node", settings, clusterSettings, threadPool); + TimedClusterApplierService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { + super("test_node", settings, clusterSettings, threadPool, clusterManagerMetrics); this.clusterSettings = clusterSettings; } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java index 4d88683826af7..bd12b09d2b983 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java @@ -10,6 +10,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.junit.After; @@ -26,11 +27,11 @@ public void terminateThreadPool() { public void testDeprecatedGetMasterServiceBWC() { try ( - ClusterService clusterService = new ClusterService( + ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool - ) + ); ) { MasterService masterService = clusterService.getMasterService(); ClusterManagerService clusterManagerService = clusterService.getClusterManagerService(); diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 85f6c129944fa..0ff8d9dc4e7a5 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -40,6 +40,7 @@ import org.opensearch.Version; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -61,6 +62,9 @@ import org.opensearch.common.util.concurrent.BaseFuture; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -95,6 +99,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class MasterServiceTests extends OpenSearchTestCase { @@ -125,6 +136,10 @@ public void randomizeCurrentTime() { } private ClusterManagerService createClusterManagerService(boolean makeClusterManager) { + return createClusterManagerService(makeClusterManager, NoopMetricsRegistry.INSTANCE); + } + + private ClusterManagerService createClusterManagerService(boolean makeClusterManager, MetricsRegistry metricsRegistry) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() @@ -132,7 +147,8 @@ private ClusterManagerService createClusterManagerService(boolean makeClusterMan .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(metricsRegistry) ); final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes( @@ -154,7 +170,18 @@ private ClusterManagerService createClusterManagerService(boolean makeClusterMan } public void testClusterManagerAwareExecution() throws Exception { - final ClusterManagerService nonClusterManager = createClusterManagerService(false); + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + final Histogram clusterStateComputeHistogram = mock(Histogram.class); + final Histogram clusterStatePublishHistogram = mock(Histogram.class); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("cluster.state.new.compute.latency")) { + return clusterStateComputeHistogram; + } + return clusterStatePublishHistogram; + }); + + final ClusterManagerService nonClusterManager = createClusterManagerService(false, metricsRegistry); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); @@ -194,6 +221,8 @@ public void onFailure(String source, Exception e) { assertFalse("non-cluster-manager cluster state update task was not executed", taskFailed[0]); nonClusterManager.close(); + + verify(clusterStateComputeHistogram, times(1)).record(anyDouble(), any()); } public void testThreadContext() throws InterruptedException { @@ -1070,7 +1099,8 @@ public void testLongClusterStateUpdateLogging() throws Exception { .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ) ) { @@ -1246,6 +1276,18 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + final Histogram clusterStateComputeHistogram = mock(Histogram.class); + final Histogram clusterStatePublishHistogram = mock(Histogram.class); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("cluster.state.new.compute.latency")) { + return clusterStateComputeHistogram; + } + return clusterStatePublishHistogram; + }); + try ( ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() @@ -1253,7 +1295,8 @@ public void testAcking() throws InterruptedException { .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(metricsRegistry) ) ) { @@ -1372,6 +1415,9 @@ public void onAckTimeout() { latch.await(); } } + + verify(clusterStateComputeHistogram, times(2)).record(anyDouble(), any()); + verify(clusterStatePublishHistogram, times(1)).record(anyDouble()); } public void testDeprecatedMasterServiceUpdateTaskThreadName() { diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index 1c43bb565ef69..dd2fb51151a5b 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -55,6 +55,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; @@ -123,7 +124,7 @@ public String getValue(final String value) { }) ); - final ClusterService clusterService = new ClusterService(Settings.EMPTY, clusterSettings, null); + final ClusterService clusterService = ClusterServiceUtils.createClusterService(Settings.EMPTY, clusterSettings, null); final Metadata.Builder builder = Metadata.builder(); final Settings settings = Settings.builder().put("foo.old", randomAlphaOfLength(8)).build(); applySettingsToBuilder.accept(builder, settings); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index bb59a5792ec8c..aa31c710c1fbd 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -18,14 +18,21 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.SnapshotShardSizeInfo; import org.opensearch.test.gateway.TestShardBatchGatewayAllocator; @@ -222,6 +229,21 @@ public void testSafelyRemoveShardFromBothBatch() { assertEquals(0, testShardsBatchGatewayAllocator.getBatchIdToStoreShardBatch().size()); } + public void testDeDuplicationOfReplicaShardsAcrossBatch() { + final ShardId shardId = new ShardId("test", "_na_", 0); + final DiscoveryNode node = newNode("node1"); + // number of replicas is greater than batch size - to ensure shardRouting gets de-duped across batch + createRoutingWithDifferentUnAssignedInfo(shardId, node, 50); + testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(10); + + // only replica shard should be in the batch + Set replicaBatches = testShardsBatchGatewayAllocator.createAndUpdateBatches(testAllocation, false); + assertEquals(1, replicaBatches.size()); + ShardsBatchGatewayAllocator.ShardsBatch shardsBatch = testShardsBatchGatewayAllocator.getBatchIdToStoreShardBatch() + .get(replicaBatches.iterator().next()); + assertEquals(1, shardsBatch.getBatchedShards().size()); + } + public void testGetBatchIdExisting() { createIndexAndUpdateClusterState(2, 1020, 1); // get all shardsRoutings for test index @@ -345,6 +367,59 @@ private void createIndexAndUpdateClusterState(int count, int numberOfShards, int ); } + private void createRoutingWithDifferentUnAssignedInfo(ShardId primaryShardId, DiscoveryNode node, int numberOfReplicas) { + + ShardRouting primaryShard = TestShardRouting.newShardRouting(primaryShardId, node.getId(), true, ShardRoutingState.STARTED); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(primaryShardId.getIndexName()) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(numberOfReplicas) + .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())) + ) + .build(); + + IndexRoutingTable.Builder isd = IndexRoutingTable.builder(primaryShardId.getIndex()) + .addIndexShard(new IndexShardRoutingTable.Builder(primaryShardId).addShard(primaryShard).build()); + + for (int i = 0; i < numberOfReplicas; i++) { + isd.addShard( + ShardRouting.newUnassigned( + primaryShardId, + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo( + UnassignedInfo.Reason.REPLICA_ADDED, + "message for replica-copy " + i, + null, + 0, + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.emptySet() + ) + ) + ); + } + + RoutingTable routingTable = RoutingTable.builder().add(isd).build(); + clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + testAllocation = new RoutingAllocation( + new AllocationDeciders(Collections.emptyList()), + new RoutingNodes(clusterState, false), + clusterState, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + + } + // call this after index creation and update cluster state private Tuple, Set> createBatchesAndAssert(int expectedBatchSize) { Set primaryBatches = testShardsBatchGatewayAllocator.createAndUpdateBatches(testAllocation, true); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index c448c4b07e03b..59fb7df5428e2 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -53,6 +53,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; import org.hamcrest.Matchers; @@ -68,7 +69,7 @@ public class GatewayServiceTests extends OpenSearchTestCase { private GatewayService createService(final Settings.Builder settings) { - final ClusterService clusterService = new ClusterService( + final ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.builder().put("cluster.name", "GatewayServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 387fbf3c84e34..696906d25fdbb 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -17,6 +17,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; @@ -57,6 +58,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.TestCustomMetadata; @@ -92,9 +94,10 @@ import org.mockito.ArgumentMatchers; import static java.util.stream.Collectors.toList; -import static org.opensearch.common.util.FeatureFlags.REMOTE_ROUTING_TABLE_EXPERIMENTAL; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.RemoteStateTransferException; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; @@ -103,6 +106,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -114,7 +118,6 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.*; public class RemoteClusterStateServiceTests extends OpenSearchTestCase { @@ -1286,7 +1289,7 @@ public void testRemoteStateStats() throws IOException { } public void testRemoteRoutingTableNotInitializedWhenDisabled() { - assertNull(remoteClusterStateService.getRemoteRoutingTableService()); + assertNull(remoteClusterStateService.getRemoteRoutingTableService()); } public void testRemoteRoutingTableInitializedWhenEnabled() { @@ -1298,11 +1301,11 @@ public void testRemoteRoutingTableInitializedWhenEnabled() { .build(); clusterSettings.applySettings(newSettings); - Settings nodeSettings = Settings.builder().put(REMOTE_ROUTING_TABLE_EXPERIMENTAL, "true").build(); + Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); FeatureFlags.initializeFeatureFlags(nodeSettings); remoteClusterStateService = new RemoteClusterStateService( - "test-node-id", + "test-node-id", repositoriesServiceSupplier, newSettings, clusterService, @@ -1651,21 +1654,22 @@ private void mockBlobContainerForGlobalMetadata( .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> getFileNameFromPath(entry.getValue().getUploadedFilename()))); - for (Map.Entry entry : customFileMap.entrySet()) { - String custom = entry.getKey(); - String fileName = entry.getValue(); - when(blobContainer.readBlob(RemoteCustomMetadata.CUSTOM_METADATA_FORMAT.blobName(fileName))).thenAnswer( - (invocation) -> { - BytesReference bytesReference = RemoteCustomMetadata.CUSTOM_METADATA_FORMAT.serialize( - metadata.custom(custom), - fileName, - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); - return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); - } - ); - } + // TODO uncomment +// for (Map.Entry entry : customFileMap.entrySet()) { +// String custom = entry.getKey(); +// String fileName = entry.getValue(); +// when(blobContainer.readBlob(RemoteCustomMetadata.CUSTOM_METADATA_FORMAT.blobName(fileName))).thenAnswer( +// (invocation) -> { +// BytesReference bytesReference = RemoteCustomMetadata.CUSTOM_METADATA_FORMAT.serialize( +// metadata.custom(custom), +// fileName, +// blobStoreRepository.getCompressor(), +// FORMAT_PARAMS +// ); +// return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); +// } +// ); +// } } else if (codecVersion == ClusterMetadataManifest.CODEC_V1) { String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); when(blobContainer.readBlob(RemoteGlobalMetadata.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))) diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTest.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTest.java index 24d13a3b7a7fd..280a53e0dc2c4 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTest.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTest.java @@ -93,15 +93,6 @@ public void tearDown() throws Exception { threadPool.shutdown(); } - public void testGet() { - ClusterBlocks clusterBlocks = getClusterBlocks(); - RemoteClusterBlocks remoteObjectForUpload = new RemoteClusterBlocks(clusterBlocks, VERSION, clusterUUID, compressor, namedXContentRegistry); - assertThat(remoteObjectForUpload.get(), is(clusterBlocks)); - - RemoteIndexMetadata remoteObjectForDownload = new RemoteIndexMetadata(TEST_BLOB_NAME, clusterUUID, compressor, namedXContentRegistry); - assertThat(remoteObjectForDownload.get(), nullValue()); - } - public void testClusterUUID() { ClusterBlocks clusterBlocks = getClusterBlocks(); RemoteClusterBlocks remoteObjectForUpload = new RemoteClusterBlocks(clusterBlocks, VERSION, clusterUUID, compressor, namedXContentRegistry); diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java index f50d90e0f6cf3..cd852731be21b 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java @@ -95,15 +95,6 @@ public void tearDown() throws Exception { threadPool.shutdown(); } - public void testGet() { - ClusterMetadataManifest manifest = getClusterMetadataManifest(); - RemoteClusterMetadataManifest remoteObjectForUpload = new RemoteClusterMetadataManifest(manifest, clusterUUID, compressor, namedXContentRegistry); - assertThat(remoteObjectForUpload.get(), is(manifest)); - - RemoteClusterMetadataManifest remoteObjectForDownload = new RemoteClusterMetadataManifest(TEST_BLOB_NAME, clusterUUID, compressor, namedXContentRegistry); - assertThat(remoteObjectForDownload.get(), nullValue()); - } - public void testClusterUUID() { ClusterMetadataManifest manifest = getClusterMetadataManifest(); RemoteClusterMetadataManifest remoteObjectForUpload = new RemoteClusterMetadataManifest(manifest, clusterUUID, compressor, namedXContentRegistry); diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java index db0e9d9f41e88..9c16c093b99b5 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java @@ -91,15 +91,6 @@ public void tearDown() throws Exception { threadPool.shutdown(); } - public void testGet() { - IndexMetadata indexMetadata = getIndexMetadata(); - RemoteIndexMetadata remoteObjectForUpload = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); - assertThat(remoteObjectForUpload.get(), is(indexMetadata)); - - RemoteIndexMetadata remoteObjectForDownload = new RemoteIndexMetadata(TEST_BLOB_NAME, clusterUUID, compressor, namedXContentRegistry); - assertThat(remoteObjectForDownload.get(), nullValue()); - } - public void testClusterUUID() { IndexMetadata indexMetadata = getIndexMetadata(); RemoteIndexMetadata remoteObjectForUpload = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); diff --git a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java index 0b657c1c9745f..8db7c58bf9503 100644 --- a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java @@ -24,6 +24,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -44,7 +45,7 @@ public class IndexingPressureServiceTests extends OpenSearchTestCase { @Before public void beforeTest() { clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - clusterService = new ClusterService(settings, clusterSettings, null); + clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); } public void testCoordinatingOperationForShardIndexingPressure() { diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index ce719a18898f8..c5ad1370ac75a 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -19,6 +19,7 @@ import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matcher; import org.hamcrest.MatcherAssert; @@ -42,7 +43,7 @@ public class ShardIndexingPressureConcurrentExecutionTests extends OpenSearchTes .build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - private final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + private final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); public enum OperationType { COORDINATING, diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java index 023063c7d6e03..31ecad7c8d701 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java @@ -8,11 +8,11 @@ package org.opensearch.index; -import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.TimeUnit; @@ -27,7 +27,7 @@ public class ShardIndexingPressureMemoryManagerTests extends OpenSearchTestCase .build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final ShardIndexingPressureSettings shardIndexingPressureSettings = new ShardIndexingPressureSettings( - new ClusterService(settings, clusterSettings, null), + ClusterServiceUtils.createClusterService(settings, clusterSettings, null), settings, IndexingPressure.MAX_INDEXING_BYTES.get(settings).getBytes() ); diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java index c555d8f9c489d..5e84a76b2250a 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java @@ -11,6 +11,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; public class ShardIndexingPressureSettingsTests extends OpenSearchTestCase { @@ -24,7 +25,7 @@ public class ShardIndexingPressureSettingsTests extends OpenSearchTestCase { .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); public void testFromSettings() { ShardIndexingPressureSettings shardIndexingPressureSettings = new ShardIndexingPressureSettings( diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java index 46f9801035ac3..d97eec4cc001d 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java @@ -8,10 +8,10 @@ package org.opensearch.index; -import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -22,7 +22,7 @@ public class ShardIndexingPressureStoreTests extends OpenSearchTestCase { private final Settings settings = Settings.builder().put(ShardIndexingPressureStore.MAX_COLD_STORE_SIZE.getKey(), 200).build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final ShardIndexingPressureSettings shardIndexingPressureSettings = new ShardIndexingPressureSettings( - new ClusterService(settings, clusterSettings, null), + ClusterServiceUtils.createClusterService(settings, clusterSettings, null), settings, IndexingPressure.MAX_INDEXING_BYTES.get(settings).getBytes() ); diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java index e7600b1d4c41a..ddc3592511de4 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java @@ -17,6 +17,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; public class ShardIndexingPressureTests extends OpenSearchTestCase { @@ -30,7 +31,7 @@ public class ShardIndexingPressureTests extends OpenSearchTestCase { .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); public void testMemoryBytesMarkedAndReleased() { ShardIndexingPressure shardIndexingPressure = new ShardIndexingPressure(settings, clusterService); diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 54a562642d4ab..08a24a74fdc89 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -1872,7 +1872,7 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc try ( Store store = createStore(); InternalEngine engine = createEngine( - config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get) + config(indexSettings, store, createTempDir(), newMergePolicy(random(), false), null, null, globalCheckpoint::get) ) ) { int numDocs = scaledRandomIntBetween(10, 100); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2aa310ae959d9..98bcaa3a1a46b 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -208,7 +208,7 @@ public void testChangeLocale() throws IOException { fieldMapping(b -> b.field("type", "date").field("format", "E, d MMM yyyy HH:mm:ss Z").field("locale", "de")) ); - mapper.parse(source(b -> b.field("field", "Mi, 06 Dez 2000 02:55:00 -0800"))); + mapper.parse(source(b -> b.field("field", "Mi., 06 Dez. 2000 02:55:00 -0800"))); } public void testNullValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldTypeInferenceTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldTypeInferenceTests.java new file mode 100644 index 0000000000000..807d0e96ce5e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/FieldTypeInferenceTests.java @@ -0,0 +1,210 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.store.Directory; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.core.index.Index; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.when; + +public class FieldTypeInferenceTests extends MapperServiceTestCase { + + private static final Map> documentMap; + static { + List listWithNull = new ArrayList<>(); + listWithNull.add(null); + documentMap = new HashMap<>(); + documentMap.put("text_field", List.of("The quick brown fox jumps over the lazy dog.")); + documentMap.put("int_field", List.of(789)); + documentMap.put("float_field", List.of(123.45)); + documentMap.put("date_field_1", List.of("2024-05-12T15:45:00Z")); + documentMap.put("date_field_2", List.of("2024-05-12")); + documentMap.put("boolean_field", List.of(true)); + documentMap.put("null_field", listWithNull); + documentMap.put("array_field_int", List.of(100, 200, 300, 400, 500)); + documentMap.put("array_field_text", List.of("100", "200")); + documentMap.put("object_type", List.of(Map.of("foo", Map.of("bar", 10)))); + } + + public void testJsonSupportedTypes() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + int totalDocs = 10000; + int docsPerLeafCount = 1000; + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + Document d = new Document(); + for (int i = 0; i < totalDocs; i++) { + iw.addDocument(d); + if ((i + 1) % docsPerLeafCount == 0) { + iw.commit(); + } + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + String[] fieldName = { "text_field" }; + Mapper mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("text", mapper.typeName()); + + fieldName[0] = "int_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("long", mapper.typeName()); + + fieldName[0] = "float_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("float", mapper.typeName()); + + fieldName[0] = "date_field_1"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("date", mapper.typeName()); + + fieldName[0] = "date_field_2"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("date", mapper.typeName()); + + fieldName[0] = "boolean_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("boolean", mapper.typeName()); + + fieldName[0] = "array_field_int"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("long", mapper.typeName()); + + fieldName[0] = "array_field_text"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("text", mapper.typeName()); + + fieldName[0] = "object_type"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("object", mapper.typeName()); + + fieldName[0] = "null_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertNull(mapper); + + // If field is missing ensure that sample docIDs generated for inference are ordered and are in bounds + fieldName[0] = "missing_field"; + List> docsEvaluated = new ArrayList<>(); + int[] totalDocsEvaluated = { 0 }; + typeInference.setSampleSize(50); + mapper = typeInference.infer(new ValueFetcher() { + @Override + public List fetchValues(SourceLookup lookup) throws IOException { + docsEvaluated.get(docsEvaluated.size() - 1).add(lookup.docId()); + totalDocsEvaluated[0]++; + return documentMap.get(fieldName[0]); + } + + @Override + public void setNextReader(LeafReaderContext leafReaderContext) { + docsEvaluated.add(new ArrayList<>()); + } + }); + assertNull(mapper); + assertEquals(typeInference.getSampleSize(), totalDocsEvaluated[0]); + for (List docsPerLeaf : docsEvaluated) { + for (int j = 0; j < docsPerLeaf.size() - 1; j++) { + assertTrue(docsPerLeaf.get(j) < docsPerLeaf.get(j + 1)); + } + if (!docsPerLeaf.isEmpty()) { + assertTrue(docsPerLeaf.get(0) >= 0 && docsPerLeaf.get(docsPerLeaf.size() - 1) < docsPerLeafCount); + } + } + } + } + } + + public void testDeleteAllDocs() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + int totalDocs = 10000; + int docsPerLeafCount = 1000; + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + Document d = new Document(); + for (int i = 0; i < totalDocs; i++) { + iw.addDocument(d); + if ((i + 1) % docsPerLeafCount == 0) { + iw.commit(); + } + } + iw.deleteAll(); + iw.commit(); + + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + String[] fieldName = { "text_field" }; + Mapper mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertNull(mapper); + } + } + } + + public void testZeroDoc() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + String[] fieldName = { "text_field" }; + Mapper mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertNull(mapper); + } + } + } + + public void testSampleGeneration() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + int totalDocs = 10000; + int docsPerLeafCount = 1000; + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + Document d = new Document(); + for (int i = 0; i < totalDocs; i++) { + iw.addDocument(d); + if ((i + 1) % docsPerLeafCount == 0) { + iw.commit(); + } + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + typeInference.setSampleSize(1000 - 1); + typeInference.infer(lookup -> documentMap.get("unknown_field")); + assertThrows(IllegalArgumentException.class, () -> typeInference.setSampleSize(1000 + 1)); + typeInference.setSampleSize(1000); + typeInference.infer(lookup -> documentMap.get("unknown_field")); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index adcfc9d7b17fc..b9c8866e69cbb 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -52,6 +52,7 @@ import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.indices.IndicesModule; import org.opensearch.indices.InvalidTypeNameException; import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.plugins.AnalysisPlugin; @@ -90,6 +91,11 @@ public void testTypeValidation() { MapperService.validateTypeName("_doc"); // no exception } + public void testGetMetadataFieldsReturnsExpectedSet() throws Throwable { + final MapperService mapperService = createIndex("test1").mapperService(); + assertEquals(mapperService.getMetadataFields(), IndicesModule.getBuiltInMetadataFields()); + } + public void testPreflightUpdateDoesNotChangeMapping() throws Throwable { final MapperService mapperService = createIndex("test1").mapperService(); final CompressedXContent mapping = createMappingSpecifyingNumberOfFields(1); diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 29efd64e5c751..f72bd76913c8f 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -311,6 +311,43 @@ public void testInlineLeafInnerHitsNestedQuery() throws Exception { assertThat(innerHitBuilders.get(leafInnerHits.getName()), Matchers.notNullValue()); } + public void testParentFilterFromInlineLeafInnerHitsNestedQuery() throws Exception { + QueryShardContext queryShardContext = createShardContext(); + SearchContext searchContext = mock(SearchContext.class); + when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + + MapperService mapperService = mock(MapperService.class); + IndexSettings settings = new IndexSettings(newIndexMeta("index", Settings.EMPTY), Settings.EMPTY); + when(mapperService.getIndexSettings()).thenReturn(settings); + when(searchContext.mapperService()).thenReturn(mapperService); + + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); + // Set null for values not related with this test case + leafInnerHits.setScriptFields(null); + leafInnerHits.setHighlightBuilder(null); + leafInnerHits.setSorts(null); + + QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilder()); + when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { + QueryShardContext context = invoke.getArgument(0); + if (context.getParentFilter() == null) { + throw new Exception("Expect parent filter to be non-null"); + } + return invoke.callRealMethod(); + }); + NestedQueryBuilder query = new NestedQueryBuilder("nested1", innerQueryBuilder, ScoreMode.None); + query.innerHit(leafInnerHits); + final Map innerHitBuilders = new HashMap<>(); + final InnerHitsContext innerHitsContext = new InnerHitsContext(); + query.extractInnerHitBuilders(innerHitBuilders); + assertThat(innerHitBuilders.size(), Matchers.equalTo(1)); + assertTrue(innerHitBuilders.containsKey(leafInnerHits.getName())); + assertNull(queryShardContext.getParentFilter()); + innerHitBuilders.get(leafInnerHits.getName()).build(searchContext, innerHitsContext); + assertNull(queryShardContext.getParentFilter()); + verify(innerQueryBuilder).toQuery(queryShardContext); + } + public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() { InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None).innerHit( diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java index d8220c93e4eeb..a2cd3f6b803e0 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java @@ -163,7 +163,7 @@ public void testMaybeUpdateRemoteStorePathStrategyExecutes() { .build(), logger ); - migrationIndexMetadataUpdater.maybeUpdateRemoteStorePathStrategy(builder, indexName); + migrationIndexMetadataUpdater.maybeUpdateRemoteStoreCustomMetadata(builder, indexName); assertCustomPathMetadataIsPresent(builder.build()); } @@ -186,7 +186,7 @@ public void testMaybeUpdateRemoteStorePathStrategyDoesNotExecute() { logger ); - migrationIndexMetadataUpdater.maybeUpdateRemoteStorePathStrategy(builder, indexName); + migrationIndexMetadataUpdater.maybeUpdateRemoteStoreCustomMetadata(builder, indexName); assertCustomPathMetadataIsPresent(builder.build()); } @@ -298,7 +298,14 @@ public static Metadata createIndexMetadataWithRemoteStoreSettings(String indexNa ) .putCustom( REMOTE_STORE_CUSTOM_KEY, - Map.of(RemoteStoreEnums.PathType.NAME, "dummy", RemoteStoreEnums.PathHashAlgorithm.NAME, "dummy") + Map.of( + RemoteStoreEnums.PathType.NAME, + "dummy", + RemoteStoreEnums.PathHashAlgorithm.NAME, + "dummy", + IndexMetadata.TRANSLOG_METADATA_KEY, + "dummy" + ) ) .build(); return Metadata.builder().put(indexMetadata).build(); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index ccdd1fe4ab609..280598c516c3c 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -15,6 +15,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.DirectoryFileTransferTracker; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -42,7 +43,7 @@ public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java new file mode 100644 index 0000000000000..7e702ad3773e8 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteStoreCustomMetadataResolverTests extends OpenSearchTestCase { + + RepositoriesService repositoriesService = mock(RepositoriesService.class); + Settings settings = Settings.EMPTY; + + public void testGetPathStrategyMinVersionOlder() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_13_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testGetPathStrategyMinVersionNewer() { + PathType pathType = randomFrom(PathType.values()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(pathType, resolver.getPathStrategy().getType()); + if (pathType.requiresHashAlgorithm()) { + assertNotNull(resolver.getPathStrategy().getHashAlgorithm()); + } else { + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + } + } + + public void testGetPathStrategyStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testGetPathStrategyStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testTranslogMetadataAllowedTrueWithMinVersionNewer() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), true).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + BlobStoreRepository repositoryMock = mock(BlobStoreRepository.class); + when(repositoriesService.repository(getRemoteStoreTranslogRepo(settings))).thenReturn(repositoryMock); + BlobStore blobStoreMock = mock(BlobStore.class); + when(repositoryMock.blobStore()).thenReturn(blobStoreMock); + when(blobStoreMock.isBlobMetadataEnabled()).thenReturn(true); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.CURRENT, + () -> repositoriesService, + settings + ); + assertTrue(resolver.isTranslogMetadataEnabled()); + } + + public void testTranslogMetadataAllowedFalseWithMinVersionNewer() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), false).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.CURRENT, + () -> repositoriesService, + settings + ); + assertFalse(resolver.isTranslogMetadataEnabled()); + } + + public void testTranslogMetadataAllowedMinVersionOlder() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertFalse(resolver.isTranslogMetadataEnabled()); + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java deleted file mode 100644 index de61c902bf13e..0000000000000 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.Version; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.test.OpenSearchTestCase; - -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; - -public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { - - public void testGetMinVersionOlder() { - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_13_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - } - - public void testGetMinVersionNewer() { - PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(pathType, resolver.get().getType()); - if (pathType.requiresHashAlgorithm()) { - assertNotNull(resolver.get().getHashAlgorithm()); - } else { - assertNull(resolver.get().getHashAlgorithm()); - } - } - - public void testGetStrategy() { - // FIXED type - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // FIXED type with hash algorithm - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } - - public void testGetStrategyWithDynamicUpdate() { - - // Default value - Settings settings = Settings.builder().build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } -} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index 9d00cf9f2be46..18d18f2dc30b1 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -14,6 +14,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -45,7 +46,7 @@ public class RemoteStorePressureServiceTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java index 064c6c10eba02..7c1ef0de91887 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java @@ -11,6 +11,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -27,7 +28,7 @@ public class RemoteStorePressureSettingsTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java index c300f316ac633..2bc4792a9c31c 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -39,7 +40,11 @@ public void setUp() throws Exception { RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE ) .build(); - clusterService = new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); + clusterService = ClusterServiceUtils.createClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); } @@ -85,7 +90,11 @@ public void testInvalidMovingAverageWindowSize() { "Failed to parse value", IllegalArgumentException.class, () -> new RemoteStoreStatsTrackerFactory( - new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + ClusterServiceUtils.createClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ), settings ) ); @@ -107,7 +116,11 @@ public void testUpdateAfterGetConfiguredSettings() { public void testGetDefaultSettings() { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory( - new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + ClusterServiceUtils.createClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ), Settings.EMPTY ); // Check moving average window size updated diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index c1fc0cdaa0d3b..59c3d3dccdd0f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,9 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; @@ -26,7 +30,9 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.REMOTE_STORE_CUSTOM_KEY; import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.determineTranslogMetadataEnabled; import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; @@ -40,6 +46,7 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { private static Map BASE64_CHARSET_IDX_MAP; + private static String index = "test-index"; static { Map charToIndexMap = new HashMap<>(); @@ -341,4 +348,54 @@ static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { String binaryString = base64PartBinary + encodedValue.substring(1); return new BigInteger(binaryString, 2).longValue(); } + + public void testDeterdetermineTranslogMetadataEnabledWhenTrue() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 1); + IndexMetadata indexMetadata = metadata.index(index); + assertTrue(determineTranslogMetadataEnabled(indexMetadata)); + } + + public void testDeterdetermineTranslogMetadataEnabledWhenFalse() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 0); + IndexMetadata indexMetadata = metadata.index(index); + assertFalse(determineTranslogMetadataEnabled(indexMetadata)); + } + + public void testDeterdetermineTranslogMetadataEnabledWhenKeyNotFound() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 2); + IndexMetadata indexMetadata = metadata.index(index); + assertThrows(AssertionError.class, () -> determineTranslogMetadataEnabled(indexMetadata)); + } + + private static Metadata createIndexMetadataWithRemoteStoreSettings(String indexName, int option) { + IndexMetadata.Builder indexMetadata = IndexMetadata.builder(indexName); + indexMetadata.settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "dummy-tlog-repo") + .put(IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "dummy-segment-repo") + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), "SEGMENT") + .build() + ).putCustom(REMOTE_STORE_CUSTOM_KEY, getCustomDataMap(option)).build(); + return Metadata.builder().put(indexMetadata).build(); + } + + private static Map getCustomDataMap(int option) { + if (option > 1) { + return Map.of(); + } + String value = (option == 1) ? "true" : "false"; + return Map.of( + RemoteStoreEnums.PathType.NAME, + "dummy", + RemoteStoreEnums.PathHashAlgorithm.NAME, + "dummy", + IndexMetadata.TRANSLOG_METADATA_KEY, + value + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 33f6c67b94b3d..bb0776e0ced25 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -23,6 +23,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.InternalEngineFactory; @@ -34,9 +35,11 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -82,7 +85,7 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { indexShard.refresh("test"); } - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool @@ -90,7 +93,12 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); - remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard, SegmentReplicationCheckpointPublisher.EMPTY, tracker); + remoteStoreRefreshListener = new RemoteStoreRefreshListener( + indexShard, + SegmentReplicationCheckpointPublisher.EMPTY, + tracker, + DefaultRemoteStoreSettings.INSTANCE + ); } private void indexDocs(int startDocId, int numberOfDocs) throws IOException { @@ -175,7 +183,12 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Since the thrown IOException is caught in the constructor, ctor should be invoked successfully. - new RemoteStoreRefreshListener(shard, SegmentReplicationCheckpointPublisher.EMPTY, mock(RemoteSegmentTransferTracker.class)); + new RemoteStoreRefreshListener( + shard, + SegmentReplicationCheckpointPublisher.EMPTY, + mock(RemoteSegmentTransferTracker.class), + DefaultRemoteStoreSettings.INSTANCE + ); // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the // listFilesByPrefixInLexicographicOrder has been called twice. @@ -370,6 +383,33 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { assertNoLagAndTotalUploadsFailed(segmentTracker, 1); } + public void testSegmentUploadTimeout() throws Exception { + // This covers the case where segment upload fails due to timeout + int succeedOnAttempt = 1; + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. + CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); + CountDownLatch successLatch = new CountDownLatch(2); + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch, + 1, + new CountDownLatch(0), + true, + true + ); + assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); + assertBusy(() -> assertEquals(1, successLatch.getCount())); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); + assertBusy(() -> { + assertTrue(segmentTracker.getTotalUploadsFailed() > 1); + assertTrue(segmentTracker.getTotalUploadsSucceeded() < 2); + }); + // shutdown threadpool for avoid leaking threads + indexShard.getThreadPool().shutdownNow(); + } + /** * Tests retry flow after snapshot and metadata files have been uploaded to remote store in the failed attempt. * Snapshot and metadata files created in failed attempt should not break retry. @@ -469,6 +509,7 @@ public void testRefreshFailedDueToPrimaryTermMisMatch() throws Exception { successLatch, checkpointPublishSucceedOnAttempt, reachedCheckpointPublishLatch, + false, false ); @@ -520,7 +561,8 @@ private Tuple mockIn successLatch, succeedCheckpointPublishOnAttempt, reachedCheckpointPublishLatch, - true + true, + false ); } @@ -530,7 +572,8 @@ private Tuple mockIn CountDownLatch successLatch, int succeedCheckpointPublishOnAttempt, CountDownLatch reachedCheckpointPublishLatch, - boolean mockPrimaryTerm + boolean mockPrimaryTerm, + boolean testUploadTimeout ) throws IOException { // Create index shard that we will be using to mock different methods in IndexShard for the unit test indexShard = newStartedShard( @@ -564,9 +607,22 @@ private Tuple mockIn // Mock (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) Store remoteStore = mock(Store.class); when(shard.remoteStore()).thenReturn(remoteStore); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + + if (testUploadTimeout) { + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + indexShard.getThreadPool(), + indexShard.shardId + ); + } else { + remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore() + .directory()).getDelegate()).getDelegate(); + } + FilterDirectory remoteStoreFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(remoteSegmentStoreDirectory)); when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); @@ -627,7 +683,7 @@ private Tuple mockIn return null; }).when(emptyCheckpointPublisher).publish(any(), any()); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool @@ -638,7 +694,28 @@ private Tuple mockIn RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); - RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); + if (testUploadTimeout) { + when(remoteStoreSettings.getClusterRemoteSegmentTransferTimeout()).thenReturn(TimeValue.timeValueMillis(10)); + doAnswer(invocation -> { + ActionListener actionListener = invocation.getArgument(5); + indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + logger.warn("copyFrom thread interrupted during sleep"); + } + actionListener.onResponse(null); + }); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(ActionListener.class), any(Boolean.class)); + } + + RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener( + shard, + emptyCheckpointPublisher, + tracker, + remoteStoreSettings + ); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index ba10d423779cd..691c1c49f0874 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -23,6 +23,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.opensearch.Version; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -54,6 +61,21 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Base64; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; + public class BlobStoreTransferServiceTests extends OpenSearchTestCase { private ThreadPool threadPool; @@ -227,6 +249,38 @@ private Environment createEnvironment() { ); } + public void testBuildTransferFileMetadata_EmptyInputStream() throws IOException { + InputStream emptyInputStream = new ByteArrayInputStream(new byte[0]); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(emptyInputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + assertEquals("", metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + + public void testBuildTransferFileMetadata_NonEmptyInputStream() throws IOException { + String inputData = "This is a test input stream."; + InputStream inputStream = new ByteArrayInputStream(inputData.getBytes(StandardCharsets.UTF_8)); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(inputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + String expectedBase64String = Base64.getEncoder().encodeToString(inputData.getBytes(StandardCharsets.UTF_8)); + assertEquals(expectedBase64String, metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + + public void testBuildTransferFileMetadata_InputStreamExceedsLimit() { + byte[] largeData = new byte[1025]; // 1025 bytes, exceeding the 1KB limit + InputStream largeInputStream = new ByteArrayInputStream(largeData); + IOException exception = assertThrows(IOException.class, () -> BlobStoreTransferService.buildTransferFileMetadata(largeInputStream)); + assertEquals(exception.getMessage(), "Input stream exceeds 1KB limit"); + } + + public void testBuildTransferFileMetadata_SmallInputStreamOptimization() throws IOException { + String inputData = "Small input"; + InputStream inputStream = new ByteArrayInputStream(inputData.getBytes(StandardCharsets.UTF_8)); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(inputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + String expectedBase64String = Base64.getEncoder().encodeToString(inputData.getBytes(StandardCharsets.UTF_8)); + assertEquals(expectedBase64String, metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + private static class TestClass implements Serializable { private TestClass(String name, String value) { this.name = name; diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 8b3fc6651a505..c6f9838ad2d52 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -15,6 +15,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; @@ -41,9 +42,12 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; @@ -53,15 +57,17 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -81,6 +87,7 @@ public class TranslogTransferManagerTests extends OpenSearchTestCase { FileTransferTracker tracker; TranslogTransferManager translogTransferManager; long delayForBlobDownload; + boolean isTranslogMetadataEnabled; @Override public void setUp() throws Exception { @@ -97,6 +104,7 @@ public void setUp() throws Exception { tlogBytes = "Hello Translog".getBytes(StandardCharsets.UTF_8); ckpBytes = "Hello Checkpoint".getBytes(StandardCharsets.UTF_8); tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); + isTranslogMetadataEnabled = false; translogTransferManager = new TranslogTransferManager( shardId, transferService, @@ -104,7 +112,8 @@ public void setUp() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); delayForBlobDownload = 1; @@ -170,7 +179,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -222,7 +232,8 @@ public void testTransferSnapshotOnUploadTimeout() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); SetOnce exception = new SetOnce<>(); translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -265,7 +276,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); SetOnce exception = new SetOnce<>(); @@ -297,64 +309,66 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { uploadThread.get().interrupt(); } - private TransferSnapshot createTransferSnapshot() { - return new TransferSnapshot() { - @Override - public Set getCheckpointFileSnapshots() { - try { - return Set.of( - new CheckpointFileSnapshot( - primaryTerm, - generation, - minTranslogGeneration, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX), - null - ), - new CheckpointFileSnapshot( - primaryTerm, - generation, - minTranslogGeneration, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX), - null - ) - ); - } catch (IOException e) { - throw new AssertionError("Failed to create temp file", e); + private TransferSnapshot createTransferSnapshot() throws IOException { + try { + CheckpointFileSnapshot checkpointFileSnapshot1 = new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX), + null + ); + CheckpointFileSnapshot checkpointFileSnapshot2 = new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX), + null + ); + TranslogFileSnapshot translogFileSnapshot1 = new TranslogFileSnapshot( + primaryTerm, + generation, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX), + null + ); + TranslogFileSnapshot translogFileSnapshot2 = new TranslogFileSnapshot( + primaryTerm, + generation - 1, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX), + null + ); + + return new TransferSnapshot() { + @Override + public Set getCheckpointFileSnapshots() { + return Set.of(checkpointFileSnapshot1, checkpointFileSnapshot2); } - } - @Override - public Set getTranslogFileSnapshots() { - try { - return Set.of( - new TranslogFileSnapshot( - primaryTerm, - generation, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX), - null - ), - new TranslogFileSnapshot( - primaryTerm, - generation - 1, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX), - null - ) - ); - } catch (IOException e) { - throw new AssertionError("Failed to create temp file", e); + @Override + public Set getTranslogFileSnapshots() { + return Set.of(translogFileSnapshot1, translogFileSnapshot2); } - } - @Override - public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); - } + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); + } - @Override - public String toString() { - return "test-to-string"; - } - }; + @Override + public Set getTranslogFileSnapshotWithMetadata() throws IOException { + translogFileSnapshot1.setMetadataFileInputStream(checkpointFileSnapshot1.inputStream()); + translogFileSnapshot2.setMetadataFileInputStream(checkpointFileSnapshot2.inputStream()); + return Set.of(translogFileSnapshot1, translogFileSnapshot2); + } + + @Override + public String toString() { + return "test-to-string"; + } + }; + } catch (Exception e) { + throw new IOException("Failed to create transfer snapshot"); + } } public void testReadMetadataNoFile() throws IOException { @@ -502,7 +516,8 @@ public void testDeleteTranslogSuccess() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -567,7 +582,8 @@ public void testDeleteTranslogFailure() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -624,4 +640,117 @@ public void testMetadataConflict() throws InterruptedException { assertThrows(RuntimeException.class, translogTransferManager::readMetadata); } + + // tests for cases when ckp is stored as translog metadata. + public void testTransferSnapshotWithTranslogMetadata() throws Exception { + AtomicInteger fileTransferSucceeded = new AtomicInteger(); + AtomicInteger fileTransferFailed = new AtomicInteger(); + AtomicInteger translogTransferSucceeded = new AtomicInteger(); + AtomicInteger translogTransferFailed = new AtomicInteger(); + + isTranslogMetadataEnabled = true; + + doNothing().when(transferService) + .uploadBlob( + any(TransferFileSnapshot.class), + Mockito.eq(remoteBaseTransferPath.add(String.valueOf(primaryTerm))), + any(WritePriority.class) + ); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + Set transferFileSnapshots = (Set) invocationOnMock.getArguments()[0]; + transferFileSnapshots.forEach(transferFileSnapshot -> { + assertNotNull(transferFileSnapshot.getMetadataFileInputStream()); + listener.onResponse(transferFileSnapshot); + }); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ) { + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + fileTransferSucceeded.incrementAndGet(); + super.onSuccess(fileSnapshot); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + fileTransferFailed.incrementAndGet(); + super.onFailure(fileSnapshot, e); + } + + }; + + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + fileTransferTracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + + assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) { + translogTransferSucceeded.incrementAndGet(); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + translogTransferFailed.incrementAndGet(); + } + })); + assertEquals(2, fileTransferSucceeded.get()); + assertEquals(0, fileTransferFailed.get()); + assertEquals(1, translogTransferSucceeded.get()); + assertEquals(0, translogTransferFailed.get()); + assertEquals(2, fileTransferTracker.allUploaded().size()); + } + + public void testDownloadTranslogWithMetadata() throws IOException { + isTranslogMetadataEnabled = true; + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + tracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + Path location = createTempDir(); + assertFalse(Files.exists(location.resolve("translog-23.tlog"))); + assertFalse(Files.exists(location.resolve("translog-23.ckp"))); + mockDownloadBlobWithMetadataResponse(); + translogTransferManager.downloadTranslog("12", "23", location); + verify(transferService, times(0)).downloadBlob(any(BlobPath.class), eq("translog-23.tlog")); + verify(transferService, times(0)).downloadBlob(any(BlobPath.class), eq("translog-23.ckp")); + verify(transferService, times(1)).downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog")); + assertTrue(Files.exists(location.resolve("translog-23.tlog"))); + assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStatsWithMetadata(); + } + + private void mockDownloadBlobWithMetadataResponse() throws IOException { + Map metadata = new HashMap<>(); + String ckpDataString = Base64.getEncoder().encodeToString(ckpBytes); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpDataString); + when(transferService.downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new InputStreamWithMetadata(new ByteArrayInputStream(tlogBytes), metadata); + }); + } + + private void assertTlogCkpDownloadStatsWithMetadata() { + assertEquals(tlogBytes.length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + // Expect delay for both tlog and ckp file + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= delayForBlobDownload); + } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 17bd821ed0c8c..8f1d58cf201e9 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -314,7 +314,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m systemIndices, true, awarenessReplicaBalance, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index d91dc696eb30b..f44cc352cd330 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -34,17 +34,23 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.bootstrap.BootstrapCheck; import org.opensearch.bootstrap.BootstrapContext; +import org.opensearch.client.Client; import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; @@ -56,22 +62,35 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.TelemetryAwarePlugin; +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -404,6 +423,81 @@ public void testCreateWithFileCache() throws Exception { } } + public void testTelemetryAwarePlugins() throws IOException { + Settings.Builder settings = baseSettings(); + List> plugins = basePlugins(); + plugins.add(MockTelemetryAwarePlugin.class); + try (Node node = new MockNode(settings.build(), plugins)) { + MockTelemetryAwareComponent mockTelemetryAwareComponent = node.injector().getInstance(MockTelemetryAwareComponent.class); + assertNotNull(mockTelemetryAwareComponent.getTracer()); + assertNotNull(mockTelemetryAwareComponent.getMetricsRegistry()); + TelemetryAwarePlugin telemetryAwarePlugin = node.getPluginsService().filterPlugins(TelemetryAwarePlugin.class).get(0); + assertTrue(telemetryAwarePlugin instanceof MockTelemetryAwarePlugin); + } + } + + public void testTelemetryPluginShouldNOTImplementTelemetryAwarePlugin() throws IOException { + Settings.Builder settings = baseSettings(); + List> plugins = basePlugins(); + plugins.add(MockTelemetryPlugin.class); + FeatureFlagSetter.set(FeatureFlags.TELEMETRY); + settings.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); + assertThrows(IllegalStateException.class, () -> new MockNode(settings.build(), plugins)); + } + + private static class MockTelemetryAwareComponent { + private final Tracer tracer; + private final MetricsRegistry metricsRegistry; + + public MockTelemetryAwareComponent(Tracer tracer, MetricsRegistry metricsRegistry) { + this.tracer = tracer; + this.metricsRegistry = metricsRegistry; + } + + public Tracer getTracer() { + return tracer; + } + + public MetricsRegistry getMetricsRegistry() { + return metricsRegistry; + } + } + + public static class MockTelemetryAwarePlugin extends Plugin implements TelemetryAwarePlugin { + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier, + Tracer tracer, + MetricsRegistry metricsRegistry + ) { + return List.of(new MockTelemetryAwareComponent(tracer, metricsRegistry)); + } + + } + + public static class MockTelemetryPlugin extends Plugin implements TelemetryPlugin, TelemetryAwarePlugin { + + @Override + public Optional getTelemetry(TelemetrySettings telemetrySettings) { + return Optional.empty(); + } + + @Override + public String getName() { + return null; + } + } + public static class MockCircuitBreakerPlugin extends Plugin implements CircuitBreakerPlugin { private SetOnce myCircuitBreaker = new SetOnce<>(); diff --git a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java index c4ba271d27ae9..de7f8977686a7 100644 --- a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java +++ b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java @@ -19,6 +19,7 @@ import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.List; import java.util.Locale; import java.util.Map; @@ -28,6 +29,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -148,4 +150,77 @@ public void testNoCryptoMetadata() throws UnknownHostException { RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); assertNull(repositoryMetadata.cryptoMetadata()); } + + public void testEqualsWithRepoSkip() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz" + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + + String routingTableRepoName = "remote-store-B"; + String routingTableRepoTypeSettingKey = String.format( + Locale.ROOT, + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String routingTableRepoSettingsKey = String.format( + Locale.ROOT, + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + + Map attr2 = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, + routingTableRepoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + routingTableRepoTypeSettingKey, + "s3", + routingTableRepoSettingsKey, + "xyz" + ); + DiscoveryNode node2 = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr2, + emptySet(), + Version.CURRENT + ); + RemoteStoreNodeAttribute remoteStoreNodeAttribute2 = new RemoteStoreNodeAttribute(node2); + + assertFalse(remoteStoreNodeAttribute.equalsWithRepoSkip(remoteStoreNodeAttribute2, List.of())); + assertTrue(remoteStoreNodeAttribute.equalsWithRepoSkip(remoteStoreNodeAttribute2, List.of(routingTableRepoName))); + } } diff --git a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java index 7ca1f1e864b99..7567224f16ac3 100644 --- a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -63,7 +64,7 @@ public class ResponseCollectorServiceTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadpool = new TestThreadPool("response_collector_tests"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadpool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java index 4f615290f1805..fbb083a3ae419 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -17,6 +17,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -33,7 +34,7 @@ public class AdmissionControlServiceTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java index c11ee1cc608f6..fbadcad804b31 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -28,7 +29,7 @@ public class AdmissionControlSettingsTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java index e72c0cd58ed64..f2cb45a033460 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java @@ -15,6 +15,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -31,7 +32,7 @@ public class CpuBasedAdmissionControllerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java index c5a2208f49ce6..54cb438e14ce6 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java @@ -15,6 +15,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -31,7 +32,7 @@ public class IoBasedAdmissionControllerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java index 6836ecb3d615f..f5686f33e7f50 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -28,7 +29,7 @@ public class CPUBasedAdmissionControllerSettingsTests extends OpenSearchTestCase public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java index c462f9700264d..3f157531f6c9a 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java @@ -21,6 +21,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -36,7 +37,7 @@ public class IoBasedAdmissionControllerSettingsTests extends OpenSearchTestCase public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("io_based_admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java index 7b4db5f787d6e..da57ef9f06a1a 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java @@ -20,6 +20,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -46,7 +47,7 @@ public void setUp() throws Exception { ) .build(); threadPool = new TestThreadPool("admission_controller_settings_test"); - ClusterService clusterService = new ClusterService( + ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java index fe0399e79a5f4..0ef9aa61bb827 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java @@ -20,6 +20,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -43,7 +44,7 @@ public void setUp() throws Exception { ) .build(); threadPool = new TestThreadPool("admission_controller_settings_test"); - ClusterService clusterService = new ClusterService( + ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java index 6883eccbf7427..818a2fa18d751 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java @@ -130,10 +130,12 @@ public void testUnrecognizedIndexMetricDidYouMean() { ); } - public void testIndexMetricsRequestWithoutIndicesMetric() throws IOException { + public void testIndexMetricsRequestWithoutIndicesAndCachesMetrics() throws IOException { final HashMap params = new HashMap<>(); final Set metrics = new HashSet<>(RestNodesStatsAction.METRICS.keySet()); metrics.remove("indices"); + // caches stats is handled separately + metrics.remove("caches"); params.put("metric", randomSubsetOf(1, metrics).get(0)); final String indexMetric = randomSubsetOf(1, RestNodesStatsAction.FLAGS.keySet()).get(0); params.put("index_metric", indexMetric); @@ -150,6 +152,19 @@ public void testIndexMetricsRequestWithoutIndicesMetric() throws IOException { ); } + public void testCacheStatsRequestWithInvalidCacheType() throws IOException { + final HashMap params = new HashMap<>(); + params.put("metric", "caches"); + final String cacheType = randomAlphaOfLength(64); + params.put("index_metric", cacheType); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_nodes/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request [/_nodes/stats] contains unrecognized cache type: [" + cacheType + "]"))); + } + public void testIndexMetricsRequestOnAllRequest() throws IOException { final HashMap params = new HashMap<>(); params.put("metric", "_all"); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 6d105c27a692f..d97cfdf003600 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -353,33 +353,40 @@ private void testSimple( newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) ) ) { + List documents = new ArrayList<>(); Document document = new Document(); addFieldConsumer.apply(document, "string", "a"); addFieldConsumer.apply(document, "string", "b"); - indexWriter.addDocument(document); + documents.add(document); + document = new Document(); addFieldConsumer.apply(document, "string", ""); addFieldConsumer.apply(document, "string", "c"); addFieldConsumer.apply(document, "string", "a"); - indexWriter.addDocument(document); + documents.add(document); + document = new Document(); addFieldConsumer.apply(document, "string", "b"); addFieldConsumer.apply(document, "string", "d"); - indexWriter.addDocument(document); + documents.add(document); + document = new Document(); addFieldConsumer.apply(document, "string", ""); if (includeDocCountField) { // Adding _doc_count to one document document.add(new NumericDocValuesField("_doc_count", 10)); } - indexWriter.addDocument(document); + documents.add(document); if (includeDeletedDocumentsInSegment) { document = new Document(); ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); - indexWriter.addDocument(document); + documents.add(document); + indexWriter.addDocuments(documents); indexWriter.deleteDocuments(new Term("string", "e")); assertEquals(5, indexWriter.getDocStats().maxDoc); // deleted document still in segment + } else { + indexWriter.addDocuments(documents); } try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index 5b1035e24185d..fd3dd8c12e84e 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -357,6 +357,70 @@ public void testDerivedFieldsParsingAndSerialization() throws IOException { } + public void testDerivedFieldsParsingAndSerializationObjectType() throws IOException { + { + String restContent = "{\n" + + " \"derived\": {\n" + + " \"duration\": {\n" + + " \"type\": \"long\",\n" + + " \"script\": \"emit(doc['test'])\"\n" + + " },\n" + + " \"ip_from_message\": {\n" + + " \"type\": \"keyword\",\n" + + " \"script\": \"emit(doc['message'])\"\n" + + " },\n" + + " \"object\": {\n" + + " \"type\": \"object\",\n" + + " \"script\": \"emit(doc['test'])\",\n" + + " \"format\": \"dd-MM-yyyy\",\n" + + " \"source_indexed_field\": \"test\",\n" + + " \"ignore_malformed\": true,\n" + + " \"properties\": {\n" + + " \"sub_field\": \"text\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"query\" : {\n" + + " \"match\": { \"content\": { \"query\": \"foo bar\" }}\n" + + " }\n" + + "}"; + + String expectedContent = + "{\"query\":{\"match\":{\"content\":{\"query\":\"foo bar\",\"operator\":\"OR\",\"prefix_length\":0,\"max_expansions\":50,\"fuzzy_transpositions\":true,\"lenient\":false,\"zero_terms_query\":\"NONE\",\"auto_generate_synonyms_phrase_query\":true,\"boost\":1.0}}},\"derived\":{\"duration\":{\"type\":\"long\",\"script\":\"emit(doc['test'])\"},\"ip_from_message\":{\"type\":\"keyword\",\"script\":\"emit(doc['message'])\"},\"object\":{\"format\":\"dd-MM-yyyy\",\"source_indexed_field\":\"test\",\"ignore_malformed\":true,\"type\":\"object\",\"script\":\"emit(doc['test'])\",\"properties\":{\"sub_field\":\"text\"}},\"derived_field\":{\"type\":\"object\",\"script\":{\"source\":\"emit(doc['message']\",\"lang\":\"painless\"},\"properties\":{\"sub_field_2\":\"keyword\"},\"source_indexed_field\":\"message\",\"format\":\"dd-MM-yyyy\",\"ignore_malformed\":true}}}"; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + searchSourceBuilder.derivedField( + "derived_field", + "object", + new Script("emit(doc['message']"), + Map.of("sub_field_2", "keyword"), + "message", + "dd-MM-yyyy", + true + ); + searchSourceBuilder = rewrite(searchSourceBuilder); + assertEquals(3, searchSourceBuilder.getDerivedFieldsObject().size()); + assertEquals(1, searchSourceBuilder.getDerivedFields().size()); + assertEquals(1, searchSourceBuilder.getDerivedFields().get(0).getProperties().size()); + assertEquals("message", searchSourceBuilder.getDerivedFields().get(0).getSourceIndexedField()); + assertEquals("dd-MM-yyyy", searchSourceBuilder.getDerivedFields().get(0).getFormat()); + assertTrue(searchSourceBuilder.getDerivedFields().get(0).getIgnoreMalformed()); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + searchSourceBuilder.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + SearchSourceBuilder deserializedBuilder = new SearchSourceBuilder(in); + String actualContent = deserializedBuilder.toString(); + assertEquals(expectedContent, actualContent); + assertEquals(searchSourceBuilder.hashCode(), deserializedBuilder.hashCode()); + assertNotSame(searchSourceBuilder, deserializedBuilder); + } + } + } + } + } + public void testAggsParsing() throws IOException { { String restContent = "{\n" diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4b1edd6efc1b9..c199d3f25edb3 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -112,6 +112,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -190,7 +191,6 @@ import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; -import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; @@ -1680,7 +1680,6 @@ private Environment createEnvironment(String nodeName) { ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(Settings.EMPTY) ) - .put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .put(MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.getKey(), 1000) // o.w. some tests might block .build() ); @@ -1922,7 +1921,13 @@ private final class TestClusterNode { settings, clusterSettings, clusterManagerService, - new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { + new ClusterApplierService( + node.getName(), + settings, + clusterSettings, + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue, threadPool); @@ -2164,7 +2169,8 @@ public void onFailure(final Exception e) { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); actions.put( CreateIndexAction.INSTANCE, @@ -2244,7 +2250,8 @@ public void onFailure(final Exception e) { ), shardLimitValidator, indicesService, - clusterInfoService::getClusterInfo + clusterInfoService::getClusterInfo, + () -> 5.0 ); actions.put( PutMappingAction.INSTANCE, diff --git a/settings.gradle b/settings.gradle index 065a8dc3d0a8a..ca8538a967ef7 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.17.3" + id "com.gradle.develocity" version "3.17.4" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 33635f2053a21..ddb876b46fd1c 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -58,7 +58,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.10.1' + api 'com.google.code.gson:gson:2.11.0' api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" @@ -71,7 +71,7 @@ dependencies { api 'org.apache.zookeeper:zookeeper:3.9.2' api "org.apache.commons:commons-text:1.12.0" api "commons-net:commons-net:3.10.0" - api "ch.qos.logback:logback-core:1.5.3" + api "ch.qos.logback:logback-core:1.5.6" api "ch.qos.logback:logback-classic:1.2.13" api 'org.apache.kerby:kerb-admin:2.0.3' runtimeOnly "com.google.guava:guava:${versions.guava}" diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 0cf9858be7632..65058b41d4bb3 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -39,6 +39,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskListener; @@ -89,6 +90,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; @@ -1144,7 +1146,8 @@ protected Optional getDisruptableMockTransport(Transpo settings, clusterSettings, deterministicTaskQueue, - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); clusterService.setNodeConnectionsService( @@ -1595,9 +1598,10 @@ static class DisruptableClusterApplierService extends ClusterApplierService { Settings settings, ClusterSettings clusterSettings, DeterministicTaskQueue deterministicTaskQueue, - ThreadPool threadPool + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics ) { - super(nodeName, settings, clusterSettings, threadPool); + super(nodeName, settings, clusterSettings, threadPool, clusterManagerMetrics); this.nodeName = nodeName; this.deterministicTaskQueue = deterministicTaskQueue; addStateApplier(event -> { diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 3ca938c99b5fd..53ef595c7931e 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.ClusterStatePublisher.AckListener; import org.opensearch.common.UUIDs; @@ -45,6 +46,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -74,7 +76,8 @@ public FakeThreadPoolClusterManagerService( super( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); this.name = serviceName; this.onTaskAvailableToRun = onTaskAvailableToRun; diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index 8f4f510da5ec3..f0c0e9bc2d589 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -52,6 +53,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.threadpool.ThreadPool; import java.util.Collections; @@ -66,7 +69,8 @@ public static ClusterManagerService createClusterManagerService(ThreadPool threa ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { @@ -169,8 +173,22 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove } public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode, ClusterSettings clusterSettings) { + return createClusterService(threadPool, localNode, clusterSettings, NoopMetricsRegistry.INSTANCE); + } + + public static ClusterService createClusterService( + ThreadPool threadPool, + DiscoveryNode localNode, + ClusterSettings clusterSettings, + MetricsRegistry metricsRegistry + ) { Settings settings = Settings.builder().put("node.name", "test").put("cluster.name", "ClusterServiceTests").build(); - ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + ClusterService clusterService = new ClusterService( + settings, + clusterSettings, + threadPool, + new ClusterManagerMetrics(metricsRegistry) + ); clusterService.setNodeConnectionsService(createNoOpNodeConnectionsService()); ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) @@ -184,6 +202,10 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove return clusterService; } + public static ClusterService createClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + return new ClusterService(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + } + public static NodeConnectionsService createNoOpNodeConnectionsService() { return new NodeConnectionsService(Settings.EMPTY, null, null) { @Override diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index a9f6fdc86155d..0eca08a7678ae 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2640,6 +2640,7 @@ private static Settings buildRemoteStoreNodeAttributes( .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 5a3f3b5a07a8d..5ee65e7ea1a1c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -125,7 +125,6 @@ import org.opensearch.index.analysis.TokenizerFactory; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.plugins.AnalysisPlugin; @@ -188,6 +187,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.core.common.util.CollectionUtils.arrayAsArrayList; +import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -1278,7 +1278,7 @@ public static Settings.Builder settings(Version version) { public static Settings.Builder remoteIndexSettings(Version version) { Settings.Builder builder = Settings.builder() - .put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) + .put(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .put(IndexMetadata.SETTING_VERSION_CREATED, version) .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()); return builder; diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java index 53a4e90adb976..fbb39c284f0ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java @@ -31,6 +31,14 @@ public class TestShardBatchGatewayAllocator extends ShardsBatchGatewayAllocator { + public TestShardBatchGatewayAllocator() { + + } + + public TestShardBatchGatewayAllocator(long maxBatchSize) { + super(maxBatchSize); + } + Map> knownAllocations = new HashMap<>(); DiscoveryNodes currentNodes = DiscoveryNodes.EMPTY_NODES; Map shardIdNodeToReplicationCheckPointMap = new HashMap<>(); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java index b9cbaacdf8873..732d4291ae670 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java @@ -37,6 +37,8 @@ import java.io.IOException; import java.util.Map; +import static org.junit.Assert.fail; + /** * Base class for executable sections that hold assertions */ @@ -79,6 +81,41 @@ protected final Object getActualValue(ClientYamlTestExecutionContext executionCo return executionContext.response(field); } + static Object convertActualValue(Object actualValue, Object expectedValue) { + if (actualValue == null || expectedValue.getClass().isAssignableFrom(actualValue.getClass())) { + return actualValue; + } + if (actualValue instanceof Number && expectedValue instanceof Number) { + if (expectedValue instanceof Float) { + return Float.parseFloat(actualValue.toString()); + } else if (expectedValue instanceof Double) { + return Double.parseDouble(actualValue.toString()); + } else if (expectedValue instanceof Integer) { + return Integer.parseInt(actualValue.toString()); + } else if (expectedValue instanceof Long) { + return Long.parseLong(actualValue.toString()); + } + } + // Force a class cast exception here, so developers can flesh out the above logic as needed. + try { + expectedValue.getClass().cast(actualValue); + } catch (ClassCastException e) { + fail( + "Type mismatch: Expected value (" + + expectedValue + + ") has type " + + expectedValue.getClass() + + ". " + + "Actual value (" + + actualValue + + ") has type " + + actualValue.getClass() + + "." + ); + } + return actualValue; + } + @Override public XContentLocation getLocation() { return location; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java index 4c2e70f37a33c..0d20dc7c326b0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java @@ -71,6 +71,7 @@ public GreaterThanAssertion(XContentLocation location, String field, Object expe @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is greater than [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 8e929eff44348..a6435c1303489 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -72,6 +72,7 @@ public GreaterThanEqualToAssertion(XContentLocation location, String field, Obje @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is greater than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java index d6e2ae1e23996..acffe03d34439 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java @@ -72,6 +72,7 @@ public LessThanAssertion(XContentLocation location, String field, Object expecte @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is less than [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index ee46c04496f32..d685d3e46a543 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -72,6 +72,7 @@ public LessThanOrEqualToAssertion(XContentLocation location, String field, Objec @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is less than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue,