diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1e3b913c5cb5a..78d8796c624d7 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -30,3 +30,4 @@ BWC_VERSION: - "2.12.0" - "2.12.1" - "2.13.0" + - "2.14.0" diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml new file mode 100644 index 0000000000000..1913d070e8c24 --- /dev/null +++ b/.github/workflows/detect-breaking-change.yml @@ -0,0 +1,27 @@ +name: "Detect Breaking Changes" +on: + pull_request + +jobs: + detect-breaking-change: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: 21 + - uses: gradle/gradle-build-action@v3 + with: + cache-disabled: true + arguments: japicmp + gradle-version: 8.7 + build-root-directory: server + - if: failure() + run: cat server/build/reports/java-compatibility/report.txt + - if: failure() + uses: actions/upload-artifact@v4 + with: + name: java-compatibility-report.html + path: ${{ github.workspace }}/server/build/reports/java-compatibility/report.html + \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 755cfd34e657e..e3102230bf4b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) - Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) +- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -101,49 +103,21 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) -- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) -- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) -- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) -- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) -- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) -- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) -- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) -- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) -- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) -- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) -- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642)) -- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) -- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542)) -- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) -- [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) -- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) -- Introduce a new setting `index.check_pending_flush.enabled` to expose the ability to disable the check for pending flushes by write threads ([#12710](https://github.com/opensearch-project/OpenSearch/pull/12710)) -- Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) +- Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285)) +- Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) +- Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) +- Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) +- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) +- [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) +- Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) ### Dependencies -- Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.33.0 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289)) -- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) -- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) -- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) -- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) -- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) -- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) -- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) -- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) -- Bump `lycheeverse/lychee-action` from 1.9.1 to 1.9.3 ([#12521](https://github.com/opensearch-project/OpenSearch/pull/12521)) -- Bump `com.azure:azure-core` from 1.39.0 to 1.47.0 ([#12520](https://github.com/opensearch-project/OpenSearch/pull/12520)) -- Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) -- Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) -- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) -- Bump Jackson version from 2.16.1 to 2.17.0 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611), [#12662](https://github.com/opensearch-project/OpenSearch/pull/12662)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) -- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) -- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) -- Bump `peter-evans/create-pull-request` from 5 to 6 ([#12724](https://github.com/opensearch-project/OpenSearch/pull/12724)) +- Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) +- Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) +- Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) +- Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) @@ -151,25 +125,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) - Mark fuzzy filter GA and remove experimental setting ([12631](https://github.com/opensearch-project/OpenSearch/pull/12631)) - Keep the election scheduler open until cluster state has been applied ([#11699](https://github.com/opensearch-project/OpenSearch/pull/11699)) +- [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) +- Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) ### Deprecated ### Removed ### Fixed -- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) -- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) -- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) -- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) -- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) -- Prevent unnecessary fetch sub phase processor initialization during fetch phase execution ([#12503](https://github.com/opensearch-project/OpenSearch/pull/12503)) -- Warn about deprecated and ignored index.mapper.dynamic index setting ([#11193](https://github.com/opensearch-project/OpenSearch/pull/11193)) -- Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) -- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) -- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) +- Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) +- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x diff --git a/README.md b/README.md index b5fc45509b002..748f8a366ecc8 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.10 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.10.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.10.0") -[![3.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") +[![2.14.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.14.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.14.0") +[![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) [![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 5d7e78589306f..599beb8649fcd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -65,9 +65,6 @@ public void apply(Project project) { addLocalMavenRepo(project); addZipArtifact(project); Task validatePluginZipPom = project.getTasks().findByName("validatePluginZipPom"); - if (validatePluginZipPom != null) { - validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); - } // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal @@ -76,7 +73,11 @@ public void apply(Project project) { .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) .collect(Collectors.toSet()); if (!publishPluginZipPublicationToTasks.isEmpty()) { - publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); + if (validatePluginZipPom != null) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn(validatePluginZipPom)); + } else { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); + } } } else { project.getLogger() diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java index 0e7a357dd5d18..d3f173c9c02ea 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java @@ -53,12 +53,19 @@ public TaskProvider createTask(Project project) { TaskProvider validateTask = project.getTasks() .register("validate" + publicationName + "Pom", PomValidationTask.class); validatePom.configure(t -> t.dependsOn(validateTask)); + TaskProvider generateMavenPom = project.getTasks() + .withType(GenerateMavenPom.class) + .named("generatePomFileFor" + publicationName + "Publication"); validateTask.configure(task -> { - GenerateMavenPom generateMavenPom = project.getTasks() - .withType(GenerateMavenPom.class) - .getByName("generatePomFileFor" + publicationName + "Publication"); task.dependsOn(generateMavenPom); - task.getPomFile().fileValue(generateMavenPom.getDestination()); + task.getPomFile().fileProvider(generateMavenPom.map(GenerateMavenPom::getDestination)); + publishing.getPublications().all(publicationForPomGen -> { + task.mustRunAfter( + project.getTasks() + .withType(GenerateMavenPom.class) + .getByName("generatePomFileFor" + Util.capitalize(publicationForPomGen.getName()) + "Publication") + ); + }); }); }); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java index aca882fbb6477..b76e0d6dd93cf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java @@ -106,6 +106,7 @@ private void validateNonNull(String element, T value, Runnable validator) { private void validateString(String element, String value) { validateNonNull(element, value, () -> validateNonEmpty(element, value, s -> s.trim().isEmpty())); + getLogger().info(element + " with value " + value + " is validated."); } private void validateCollection(String element, Collection value, Consumer validator) { diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java index def5248c1f255..0ede465439400 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java @@ -45,6 +45,8 @@ public class GradleThreadsFilter implements ThreadFilter { public boolean reject(Thread t) { return t.getName().startsWith("Exec process") || t.getName().startsWith("Memory manager") - || t.getName().startsWith("File watcher consumer"); + || t.getName().startsWith("File watcher consumer") + || t.getName().startsWith("sshd-SshClient") /* Started by SshClient (sshd-core), part of SftpFileSystemProvider */ + || t.getName().startsWith("Thread-"); /* Started by AbstractFactoryManager (sshd-core), part of SftpFileSystemProvider */ } } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8705588babe97..5c9cd25bb79ad 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -14,7 +14,7 @@ icu4j = 70.1 supercsv = 2.4.0 log4j = 2.21.0 slf4j = 1.7.36 -asm = 9.6 +asm = 9.7 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 @@ -26,7 +26,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.107.Final +netty = 4.1.108.Final joda = 2.12.2 # project reactor diff --git a/distribution/src/bin/opensearch b/distribution/src/bin/opensearch index 947d1167f79f2..8a3b0a009437f 100755 --- a/distribution/src/bin/opensearch +++ b/distribution/src/bin/opensearch @@ -36,14 +36,16 @@ fi # get keystore password before setting java options to avoid # conflicting GC configurations for the keystore tools -unset KEYSTORE_PASSWORD -KEYSTORE_PASSWORD= if [[ $CHECK_KEYSTORE = true ]] \ && bin/opensearch-keystore has-passwd --silent then - if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then - echo "Failed to read keystore password on console" 1>&2 - exit 1 + if [[ ! -z "${KEYSTORE_PASSWORD}" ]]; then + echo "Using value of KEYSTORE_PASSWORD from the environment" + else + if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then + echo "Failed to read keystore password on console" 1>&2 + exit 1 + fi fi fi diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index cce21504c55b7..b7ecab24165fa 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -62,14 +62,17 @@ if not exist "%SERVICE_LOG_DIR%" ( mkdir "%SERVICE_LOG_DIR%" ) -SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent IF !ERRORLEVEL! EQU 0 ( - SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: - IF !ERRORLEVEL! NEQ 0 ( - ECHO Failed to read keystore password on standard input - EXIT /B !ERRORLEVEL! + if defined KEYSTORE_PASSWORD ( + ECHO Using value of KEYSTORE_PASSWORD from the environment + ) else ( + SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: + IF !ERRORLEVEL! NEQ 0 ( + ECHO Failed to read keystore password on standard input + EXIT /B !ERRORLEVEL! + ) ) ) ) diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 82a4add334a7d..9b0d73222260e 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d +distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index cce86b452f698..dda3983fbb4d1 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -33,6 +33,7 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -69,8 +70,9 @@ /** * A core library base class for all opensearch exceptions. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchException extends RuntimeException implements Writeable, ToXContentFragment { protected static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0); diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 66ba446d4fc54..56df46ae94d44 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -101,6 +101,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_2_14_0 = new Version(2140099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java index 846950ff17c63..9a09b3b38a5f2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java @@ -32,14 +32,17 @@ package org.opensearch.core.common.breaker; +import org.opensearch.common.annotation.PublicApi; + import java.util.Locale; /** * Interface for an object that can be incremented, breaking after some * configured limit has been reached. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface CircuitBreaker { /** @@ -72,8 +75,10 @@ public interface CircuitBreaker { /** * The type of breaker * can be {@link #MEMORY}, {@link #PARENT}, or {@link #NOOP} - * @opensearch.internal + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { /** A regular or ChildMemoryCircuitBreaker */ MEMORY, diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java index 8908a172395f2..e2266339c058f 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.InetAddresses; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ * the addresses the transport is bound to, and the other is the published one that represents the address clients * should communicate on. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BoundTransportAddress implements Writeable { private TransportAddress[] boundAddresses; diff --git a/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java index 038069e93a51b..4ae01e140a89c 100644 --- a/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java @@ -32,6 +32,7 @@ package org.opensearch.core.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -40,8 +41,9 @@ /** * Response over the transport interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TransportResponse extends TransportMessage { /** diff --git a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java index da87acc7124aa..da8c06c07d8e5 100644 --- a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java +++ b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java @@ -10,6 +10,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.semver.expr.Caret; @@ -31,7 +32,10 @@ *
  • '~' Allows for patch version variability starting from the range version. For example, ~1.2.3 range would match versions greater than or equal to 1.2.3 but less than 1.3.0
  • *
  • '^' Allows for patch and minor version variability starting from the range version. For example, ^1.2.3 range would match versions greater than or equal to 1.2.3 but less than 2.0.0
  • * + * + * @opensearch.api */ +@PublicApi(since = "2.13.0") public class SemverRange implements ToXContentFragment { private final Version rangeVersion; diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java index 2a81fa5f4986e..c7b5a8978188f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.network.InetAddresses; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -118,6 +119,19 @@ public Object convert(Object value) { return value.toString(); } }, + IP { + @Override + public Object convert(Object value) { + // If the value is a valid ipv4/ipv6 address, we return the original value directly because IpFieldType + // can accept string value, this is simpler than we return an InetAddress object which needs to do more + // work such as serialization + if (value instanceof String && InetAddresses.isInetAddress(value.toString())) { + return value; + } else { + throw new IllegalArgumentException("[" + value + "] is not a valid ipv4/ipv6 address"); + } + } + }, AUTO { @Override public Object convert(Object value) { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java index 0ba0a39261d00..50ece9282888f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java @@ -550,4 +550,29 @@ public void testTargetField() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt))); assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt)); } + + public void testConvertIP() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String validIPString; + if (randomBoolean()) { + validIPString = "1.2.3.4"; + } else { + validIPString = "::1"; + } + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, validIPString); + + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(validIPString)); + + String invalidIPString = randomAlphaOfLength(10); + fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, invalidIPString); + Processor processorWithInvalidIP = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + try { + processorWithInvalidIP.execute(ingestDocument); + fail("processor execute should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[" + invalidIPString + "] is not a valid ipv4/ipv6 address")); + } + } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java index 3259ba85ef340..b53ce2db994a8 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -24,7 +24,7 @@ public class CopyProcessorTests extends OpenSearchTestCase { public void testCopyExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); @@ -71,7 +71,8 @@ public void testCopyWithIgnoreMissing() throws Exception { public void testCopyWithRemoveSource() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml new file mode 100644 index 0000000000000..994ed225dd624 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml @@ -0,0 +1,83 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test convert processor with ip type": + - skip: + version: " - 2.13.99" + reason: "introduced in 2.14.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "type": "ip" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /\[1.1.1.\] is not a valid ipv4\/ipv6 address/ + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "target_field" : "ip_field", + "type" : "ip", + "ignore_failure" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1."} } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1.1" + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1.1", ip_field: "1.1.1.1"} } diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 deleted file mode 100644 index 2d9e6a9d3cfd6..0000000000000 --- a/modules/lang-expression/licenses/asm-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-9.7.jar.sha1 new file mode 100644 index 0000000000000..84c9a9703af6d --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.7.jar.sha1 @@ -0,0 +1 @@ +073d7b3086e14beb604ced229c302feff6449723 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 deleted file mode 100644 index a0814f495771f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 new file mode 100644 index 0000000000000..1de4404e7d5d0 --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 @@ -0,0 +1 @@ +e86dda4696d3c185fcc95d8d311904e7ce38a53f \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 deleted file mode 100644 index 101eb03b4b736..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 new file mode 100644 index 0000000000000..d4eeef6151272 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 @@ -0,0 +1 @@ +e446a17b175bfb733b87c5c2560ccb4e57d69f1a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 deleted file mode 100644 index 2d9e6a9d3cfd6..0000000000000 --- a/modules/lang-painless/licenses/asm-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-9.7.jar.sha1 new file mode 100644 index 0000000000000..84c9a9703af6d --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.7.jar.sha1 @@ -0,0 +1 @@ +073d7b3086e14beb604ced229c302feff6449723 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 deleted file mode 100644 index fa42ea1198165..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 new file mode 100644 index 0000000000000..c7687adfeb990 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 @@ -0,0 +1 @@ +e4a258b7eb96107106c0599f0061cfc1832fe07a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 deleted file mode 100644 index a0814f495771f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 new file mode 100644 index 0000000000000..1de4404e7d5d0 --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 @@ -0,0 +1 @@ +e86dda4696d3c185fcc95d8d311904e7ce38a53f \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 deleted file mode 100644 index 101eb03b4b736..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 new file mode 100644 index 0000000000000..d4eeef6151272 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 @@ -0,0 +1 @@ +e446a17b175bfb733b87c5c2560ccb4e57d69f1a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 deleted file mode 100644 index 1f42ac62dc69c..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 new file mode 100644 index 0000000000000..37c0d27efe46f --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 @@ -0,0 +1 @@ +c0655519f24d92af2202cb681cd7c1569df6ead6 \ No newline at end of file diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml index 478ca9ae8abf4..20e6fd351a4b9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml @@ -2,7 +2,7 @@ - do: scripts_painless_context: {} - match: { contexts.0: aggregation_selector} - - match: { contexts.23: update} + - match: { contexts.24: update} --- "Action to get all API values for score context": diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index b5f687ce34d4b..f55ad2e9d659c 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -47,6 +47,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -68,6 +69,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryStringQueryBuilder; import org.opensearch.plugins.Plugin; import java.io.IOException; @@ -541,6 +543,31 @@ public void testMatchPhrase() throws IOException { } } + public void testNestedExistsQuery() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("nested_field"); + { + b.field("type", "search_as_you_type"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + Query actual = new QueryStringQueryBuilder("field:*").toQuery(queryShardContext); + Query expected = new ConstantScoreQuery( + new BooleanQuery.Builder().add(new NormsFieldExistsQuery("field.nested_field"), BooleanClause.Occur.SHOULD).build() + ); + assertEquals(expected, actual); + } + private static BooleanQuery buildBoolPrefixQuery(String shingleFieldName, String prefixFieldName, List terms) { final BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int i = 0; i < terms.size() - 1; i++) { diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java index 51a76903e284d..978c92870bd75 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java @@ -37,19 +37,27 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpHandlingSettings; +import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpChannel; import org.opensearch.http.netty4.Netty4HttpServerTransport; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TransportAdapterProvider; import org.opensearch.transport.netty4.ssl.SslUtils; import javax.net.ssl.SSLEngine; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.DecoderException; import io.netty.handler.ssl.ApplicationProtocolNames; import io.netty.handler.ssl.ApplicationProtocolNegotiationHandler; @@ -59,9 +67,14 @@ * @see SecuritySSLNettyHttpServerTransport */ public class SecureNetty4HttpServerTransport extends Netty4HttpServerTransport { + public static final String REQUEST_HEADER_VERIFIER = "HeaderVerifier"; + public static final String REQUEST_DECOMPRESSOR = "RequestDecompressor"; + private static final Logger logger = LogManager.getLogger(SecureNetty4HttpServerTransport.class); - private final SecureTransportSettingsProvider secureTransportSettingsProvider; - private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + private final TransportExceptionHandler exceptionHandler; + private final ChannelInboundHandlerAdapter headerVerifier; + private final TransportAdapterProvider decompressorProvider; public SecureNetty4HttpServerTransport( final Settings settings, @@ -72,7 +85,7 @@ public SecureNetty4HttpServerTransport( final Dispatcher dispatcher, final ClusterSettings clusterSettings, final SharedGroupFactory sharedGroupFactory, - final SecureTransportSettingsProvider secureTransportSettingsProvider, + final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, final Tracer tracer ) { super( @@ -86,9 +99,45 @@ public SecureNetty4HttpServerTransport( sharedGroupFactory, tracer ); - this.secureTransportSettingsProvider = secureTransportSettingsProvider; - this.exceptionHandler = secureTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) - .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + + this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; + this.exceptionHandler = secureHttpTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) + .orElse(TransportExceptionHandler.NOOP); + + final List headerVerifiers = secureHttpTransportSettingsProvider.getHttpTransportAdapterProviders( + settings + ) + .stream() + .filter(p -> REQUEST_HEADER_VERIFIER.equalsIgnoreCase(p.name())) + .map(p -> p.create(settings, this, ChannelInboundHandlerAdapter.class)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (headerVerifiers.size() > 1) { + throw new IllegalArgumentException("Cannot have more than one header verifier configured, supplied " + headerVerifiers.size()); + } + + final Optional> decompressorProviderOpt = secureHttpTransportSettingsProvider + .getHttpTransportAdapterProviders(settings) + .stream() + .filter(p -> REQUEST_DECOMPRESSOR.equalsIgnoreCase(p.name())) + .findFirst(); + // There could be multiple request decompressor providers configured, using the first one + decompressorProviderOpt.ifPresent(p -> logger.debug("Using request decompressor provider: {}", p)); + + this.headerVerifier = headerVerifiers.isEmpty() ? null : headerVerifiers.get(0); + this.decompressorProvider = decompressorProviderOpt.orElseGet(() -> new TransportAdapterProvider() { + @Override + public String name() { + return REQUEST_DECOMPRESSOR; + } + + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.empty(); + } + }); } @Override @@ -152,7 +201,7 @@ protected SslHttpChannelHandler(final Netty4HttpServerTransport transport, final protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - final SSLEngine sslEngine = secureTransportSettingsProvider.buildSecureHttpServerEngine( + final SSLEngine sslEngine = secureHttpTransportSettingsProvider.buildSecureHttpServerEngine( settings, SecureNetty4HttpServerTransport.this ).orElseGet(SslUtils::createDefaultServerSSLEngine); @@ -166,4 +215,17 @@ protected void configurePipeline(Channel ch) { ch.pipeline().addLast(new Http2OrHttpHandler()); } } + + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + if (headerVerifier != null) { + return headerVerifier; + } else { + return super.createHeaderVerifier(); + } + } + + @Override + protected ChannelInboundHandlerAdapter createDecompressor() { + return decompressorProvider.create(settings, this, ChannelInboundHandlerAdapter.class).orElseGet(super::createDecompressor); + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index 56163c18949a4..e2c84ab5d339a 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -49,6 +49,7 @@ import org.opensearch.http.netty4.ssl.SecureNetty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -160,7 +161,7 @@ public Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { return Collections.singletonMap( @@ -174,7 +175,7 @@ public Map> getSecureHttpTransports( dispatcher, clusterSettings, getSharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, tracer ) ); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java index 9c63a1ab9161b..977121346dcc3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; @@ -72,7 +73,7 @@ public class SecureNetty4Transport extends Netty4Transport { private static final Logger logger = LogManager.getLogger(SecureNetty4Transport.class); private final SecureTransportSettingsProvider secureTransportSettingsProvider; - private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + private final TransportExceptionHandler exceptionHandler; public SecureNetty4Transport( final Settings settings, @@ -100,7 +101,7 @@ public SecureNetty4Transport( this.secureTransportSettingsProvider = secureTransportSettingsProvider; this.exceptionHandler = secureTransportSettingsProvider.buildServerTransportExceptionHandler(settings, this) - .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + .orElse(TransportExceptionHandler.NOOP); } @Override diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java new file mode 100644 index 0000000000000..1ab1ae4f5ddfd --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java @@ -0,0 +1,242 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4.ssl; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.NullDispatcher; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TransportAdapterProvider; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import io.netty.channel.ChannelInboundHandlerAdapter; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests for the {@link SecureNetty4HttpServerTransport} class. + */ +public class SecureNetty4HttpServerTransportConfigurationTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + private static class ConfigurableSecureHttpTransportSettingsProvider implements SecureHttpTransportSettingsProvider { + private final List> transportAdapterProviders; + + public ConfigurableSecureHttpTransportSettingsProvider( + List> transportAdapterProviders + ) { + this.transportAdapterProviders = transportAdapterProviders; + } + + @Override + public Collection> getHttpTransportAdapterProviders(Settings settings) { + return transportAdapterProviders; + } + + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + return Optional.empty(); + } + } + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + public void testRequestHeaderVerifier() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ) { + + } + } + + public void testMultipleRequestHeaderVerifiers() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider, transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ); + + assertThat(ex.getMessage(), equalTo("Cannot have more than one header verifier configured, supplied 2")); + } + + public void testRequestDecompressor() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_DECOMPRESSOR; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ) { + + } + } + + public void testRequestDecompressorAndRequestHeaderVerifier() throws InterruptedException { + final TransportAdapterProvider requestDecompressor = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_DECOMPRESSOR; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + final TransportAdapterProvider requestHeaderVerifier = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(requestDecompressor, requestHeaderVerifier)), + NoopTracer.INSTANCE + ) + ) { + + } + } +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java index 9ea49d0b24d44..e79a066ad8f63 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -29,7 +29,8 @@ import org.opensearch.http.HttpTransportSettings; import org.opensearch.http.NullDispatcher; import org.opensearch.http.netty4.Netty4HttpClient; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; @@ -40,7 +41,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NettyAllocator; import org.opensearch.transport.SharedGroupFactory; -import org.opensearch.transport.TcpTransport; import org.opensearch.transport.netty4.ssl.TrustAllManager; import org.junit.After; import org.junit.Before; @@ -83,7 +83,6 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.ssl.ClientAuth; import io.netty.handler.ssl.SslContextBuilder; import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; @@ -104,7 +103,7 @@ public class SecureNetty4HttpServerTransportTests extends OpenSearchTestCase { private ThreadPool threadPool; private MockBigArrays bigArrays; private ClusterSettings clusterSettings; - private SecureTransportSettingsProvider secureTransportSettingsProvider; + private SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; @Before public void setup() throws Exception { @@ -113,14 +112,9 @@ public void setup() throws Exception { bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - secureTransportSettingsProvider = new SecureTransportSettingsProvider() { + secureHttpTransportSettingsProvider = new SecureHttpTransportSettingsProvider() { @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { return Optional.empty(); } @@ -146,22 +140,6 @@ public Optional buildSecureHttpServerEngine(Settings settings, HttpSe throw new SSLException(ex); } } - - @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { - return Optional.empty(); - } - - @Override - public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { - return Optional.of( - SslContextBuilder.forClient() - .clientAuth(ClientAuth.NONE) - .trustManager(TrustAllManager.INSTANCE) - .build() - .newEngine(NettyAllocator.getAllocator()) - ); - } }; } @@ -241,7 +219,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, dispatcher, clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -292,7 +270,7 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(Settings.EMPTY), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -312,7 +290,7 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -366,7 +344,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -430,7 +408,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(Settings.EMPTY), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -487,7 +465,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -562,7 +540,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java index 0cae58b8efa2a..df3b005f40903 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -20,8 +20,8 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.http.HttpServerTransport; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; @@ -69,40 +69,12 @@ protected Transport build(Settings settings, final Version version, ClusterSetti NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); final SecureTransportSettingsProvider secureTransportSettingsProvider = new SecureTransportSettingsProvider() { @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + public Optional buildServerTransportExceptionHandler(Settings settings, Transport transport) { return Optional.empty(); } @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { - try { - final KeyStore keyStore = KeyStore.getInstance("PKCS12"); - keyStore.load( - SimpleSecureNetty4TransportTests.class.getResourceAsStream("/netty4-secure.jks"), - "password".toCharArray() - ); - - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); - keyManagerFactory.init(keyStore, "password".toCharArray()); - - SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) - .trustManager(TrustAllManager.INSTANCE) - .build() - .newEngine(NettyAllocator.getAllocator()); - return Optional.of(engine); - } catch (final IOException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException - | CertificateException ex) { - throw new SSLException(ex); - } - } - - @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { + public Optional buildSecureServerTransportEngine(Settings settings, Transport transport) throws SSLException { try { final KeyStore keyStore = KeyStore.getInstance("PKCS12"); keyStore.load( diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 31db767b2c68e..c7836170d658f 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -45,7 +45,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.47.0' - api 'com.azure:azure-json:1.0.1' + api 'com.azure:azure-json:1.1.0' api 'com.azure:azure-storage-common:12.21.2' api 'com.azure:azure-core-http-netty:1.12.8' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 b/plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 deleted file mode 100644 index 128a82717fef9..0000000000000 --- a/plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abdfdb0c49eebe75ed8532d047dea0c9f13c30ac \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 b/plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 new file mode 100644 index 0000000000000..e44ee47c40253 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 @@ -0,0 +1 @@ +1f21cea72f54a6af3b0bb6831eb3874bd4afd213 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index d823de7ffadd4..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..93207338f7db8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 deleted file mode 100644 index 114d77a1bb95f..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b7070e9acfe262bb0bd936c4051116631796b3b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..e850aad5f3656 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +3ad0af28e408092f0d12994802a9f3fe18d45f8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5a4bde479eb38..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebc495e9b2bc2c9ab60a264b40f62dc0671d9f6e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d4ae1b7e71661 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +62b6a5dfee2e22ab9015a469cb68e4727596fd4c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index a62cb0fefcc40..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..8d299e265646d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 0ddcf0f6dddca..1dfc64e19601c 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -60,7 +60,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.33.0' + api 'com.google.api.grpc:proto-google-common-protos:2.37.1' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" @@ -149,6 +149,9 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', 'com.google.auth.oauth2.GdchCredentials', + 'com.google.protobuf.MapFieldBuilder', + 'com.google.protobuf.MapFieldBuilder$Converter', + 'com.google.protobuf.MapFieldReflectionAccessor', 'com.google.protobuf.util.JsonFormat', 'com.google.protobuf.util.JsonFormat$Parser', 'com.google.protobuf.util.JsonFormat$Printer', diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 deleted file mode 100644 index 746e4e99fd881..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -644e11df1cec6d38a63a9a06a701e48c398b87d0 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 new file mode 100644 index 0000000000000..92f991778ccc3 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 @@ -0,0 +1 @@ +3b8759ef0468cced72f8f0d4fc3cc57aeb8139f8 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 36843e3bc8700..6faf0383d3ba2 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -74,8 +74,8 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" - api 'org.apache.commons:commons-configuration2:2.9.0' - api 'commons-io:commons-io:2.15.1' + api 'org.apache.commons:commons-configuration2:2.10.1' + api 'commons-io:commons-io:2.16.0' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 new file mode 100644 index 0000000000000..d4c0f8417d357 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 @@ -0,0 +1 @@ +2b681b3bcddeaa5bf5c2a2939cd77e2f9ad6efda \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 deleted file mode 100644 index 086c769fe600c..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adc3ee6b84fc62a6e75e901d080adacb72aac61e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 deleted file mode 100644 index 47c5d13812a36..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 deleted file mode 100644 index 0e3595fecb0d2..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3019703b67413ef3d6150da1f49753f4010507ce \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..5f0eed9c5d7e4 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ad97680373f9c9f278f597ad6552d44e20418929 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 deleted file mode 100644 index 6b9a35acb2c20..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9234407d6a46745599735765c4d3755c7fc84162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..6ed00ff79dea9 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +baf7b939ef71b25713cacbe47bef8caf80ce99c6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index d823de7ffadd4..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..93207338f7db8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index a62cb0fefcc40..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..8d299e265646d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-2.13.0.md b/release-notes/opensearch.release-notes-2.13.0.md new file mode 100644 index 0000000000000..e55c22d6b851d --- /dev/null +++ b/release-notes/opensearch.release-notes-2.13.0.md @@ -0,0 +1,72 @@ +## 2024-03-21 Version 2.13.0 Release Notes + +## [2.13.0] +### Added +- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) +- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) +- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) +- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) +- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) +- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) +- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) +- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642)) +- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) +- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) +- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542)) +- [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) +- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) +- Lightweight Transport action to verify local term before fetching cluster-state from remote ([#12252](https://github.com/opensearch-project/OpenSearch/pull/12252/)) +- Integrate with admission controller for cluster-manager Read API. ([#12496](https://github.com/opensearch-project/OpenSearch/pull/12496)) + +### Dependencies +- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) +- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) +- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) +- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) +- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) +- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) +- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) +- Bump `lycheeverse/lychee-action` from 1.9.1 to 1.9.3 ([#12521](https://github.com/opensearch-project/OpenSearch/pull/12521)) +- Bump `com.azure:azure-core` from 1.39.0 to 1.47.0 ([#12520](https://github.com/opensearch-project/OpenSearch/pull/12520)) +- Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) +- Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) +- Bump Jackson version from 2.16.1 to 2.17.0 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611), [#12662](https://github.com/opensearch-project/OpenSearch/pull/12662)) +- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) + +### Changed +- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) +- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) +- Mark fuzzy filter GA and remove experimental setting ([12631](https://github.com/opensearch-project/OpenSearch/pull/12631)) +- Keep the election scheduler open until cluster state has been applied ([#11699](https://github.com/opensearch-project/OpenSearch/pull/11699)) + +### Fixed +- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) +- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) +- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) +- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) +- Prevent unnecessary fetch sub phase processor initialization during fetch phase execution ([#12503](https://github.com/opensearch-project/OpenSearch/pull/12503)) +- Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) +- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) +- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) +- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) +- Avoid overflow when sorting missing last on `epoch_millis` datetime field ([#12676](https://github.com/opensearch-project/OpenSearch/pull/12676)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 29fbf55417961..989ea6b93f47f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,13 +1,108 @@ "Help": - skip: - version: " - 2.11.99" + version: " - 2.13.99" + reason: search idle reactivate count total is only added in 3.0.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: "2.14.0 - " + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.concurrent_query_current .+ \n + search.concurrent_query_time .+ \n + search.concurrent_query_total .+ \n + search.concurrent_avg_slice_count .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n + search.search_idle_reactivate_count_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + docs.deleted .+ \n + $/ +--- +"Help from 2.12.0 to 2.13.99": + - skip: + version: " - 2.11.99 , 2.14.0 - " reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.12.0 - " + version: "2.12.0 - 2.13.99" - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index fa48820a71a89..07df09225c624 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -113,12 +113,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml index b298575d15410..c9c1558797a35 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the clone operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml index 2caf604eb4296..b93c75f6819c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the open operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 426729e737978..67b5be7eb0fd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -72,12 +72,12 @@ setup: - match: { _id: "1" } - match: { _source: { foo: "hello world" } } +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml index f7568b1446967..53df9f61700cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the shrink operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 50c2819eac9d5..096a61a765288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -219,12 +219,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml index 2ce4fc620742a..9d56cc0800b09 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the split operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/server/build.gradle b/server/build.gradle index e36498bf1038b..7d52849844aaa 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,6 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') + id('me.champeau.gradle.japicmp') version '0.4.2' } publishing { @@ -141,7 +142,7 @@ tasks.withType(JavaCompile).configureEach { compileJava { options.compilerArgs += ['-processor', ['org.apache.logging.log4j.core.config.plugins.processor.PluginProcessor', - 'org.opensearch.common.annotation.processor.ApiAnnotationProcessor'].join(','), '-AcontinueOnFailingChecks'] + 'org.opensearch.common.annotation.processor.ApiAnnotationProcessor'].join(',')] } tasks.named("internalClusterTest").configure { @@ -378,3 +379,81 @@ tasks.named("sourcesJar").configure { duplicatesStrategy = DuplicatesStrategy.EXCLUDE } } + +/** Compares the current build against a snapshot build */ +tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { + oldClasspath.from(files("${buildDir}/snapshot/opensearch-${version}.jar")) + newClasspath.from(tasks.named('jar')) + onlyModified = true + failOnModification = true + ignoreMissingClasses = true + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi'] + txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") + htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") + dependsOn downloadSnapshot +} + +/** If the Java API Comparison task failed, print a hint if the change should be merged from its target branch */ +gradle.taskGraph.afterTask { Task task, TaskState state -> + if (task.name == 'japicmp' && state.failure != null) { + def sha = getGitShaFromJar("${buildDir}/snapshot/opensearch-${version}.jar") + logger.info("Incompatiable java api from snapshot jar built off of commit ${sha}") + + if (!inHistory(sha)) { + logger.warn('\u001B[33mPlease merge from the target branch and run this task again.\u001B[0m') + } + } +} + +/** Downloads latest snapshot from maven repository */ +tasks.register("downloadSnapshot", Copy) { + def mavenSnapshotRepoUrl = "https://aws.oss.sonatype.org/content/repositories/snapshots/" + def groupId = "org.opensearch" + def artifactId = "opensearch" + + repositories { + maven { + url mavenSnapshotRepoUrl + } + } + + configurations { + snapshotArtifact + } + + dependencies { + snapshotArtifact("${groupId}:${artifactId}:${version}:") + } + + from configurations.snapshotArtifact + into "$buildDir/snapshot" +} + +/** Check if the sha is in the current history */ +def inHistory(String sha) { + try { + def commandCheckSha = "git merge-base --is-ancestor ${sha} HEAD" + commandCheckSha.execute() + return true + } catch (Exception) { + return false + } +} + +/** Extracts the Git SHA used to build a jar from its manifest */ +def getGitShaFromJar(String jarPath) { + def sha = '' + try { + // Open the JAR file + def jarFile = new java.util.jar.JarFile(jarPath) + // Get the manifest from the JAR file + def manifest = jarFile.manifest + def attributes = manifest.mainAttributes + // Assuming the Git SHA is stored under an attribute named 'Git-SHA' + sha = attributes.getValue('Change') + jarFile.close() + } catch (IOException e) { + println "Failed to read the JAR file: $e.message" + } + return sha +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/term/FetchByTermVersionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/term/FetchByTermVersionIT.java new file mode 100644 index 0000000000000..72dcc98dcdc12 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/term/FetchByTermVersionIT.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.is; + +@SuppressWarnings("unchecked") +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class FetchByTermVersionIT extends OpenSearchIntegTestCase { + + AtomicBoolean isTermVersionCheckEnabled = new AtomicBoolean(); + + protected Collection> nodePlugins() { + return List.of(MockTransportService.TestPlugin.class); + } + + AtomicBoolean forceFetchFromCM = new AtomicBoolean(); + + public void testClusterStateResponseFromDataNode() throws Exception { + String cm = internalCluster().startClusterManagerOnlyNode(); + List dns = internalCluster().startDataOnlyNodes(5); + int numberOfShards = dns.size(); + stubClusterTermResponse(cm); + + ensureClusterSizeConsistency(); + ensureGreen(); + + List indices = new ArrayList<>(); + + // Create a large sized cluster-state by creating field mappings + IntStream.range(0, 20).forEachOrdered(n -> { + String index = "index_" + n; + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) + .build() + ); + indices.add(index); + }); + IntStream.range(0, 5).forEachOrdered(n -> { + List mappings = new ArrayList<>(); + for (int i = 0; i < 2000; i++) { + mappings.add("t-123456789-123456789-" + n + "-" + i); + mappings.add("type=keyword"); + } + PutMappingRequest request = new PutMappingRequest().source(mappings.toArray(new String[0])) + .indices(indices.toArray(new String[0])); + internalCluster().dataNodeClient().admin().indices().putMapping(request).actionGet(); + }); + ensureGreen(); + + ClusterStateResponse stateResponseM = internalCluster().clusterManagerClient() + .admin() + .cluster() + .state(new ClusterStateRequest()) + .actionGet(); + + waitUntil(() -> { + ClusterStateResponse stateResponseD = internalCluster().dataNodeClient() + .admin() + .cluster() + .state(new ClusterStateRequest()) + .actionGet(); + return stateResponseD.getState().stateUUID().equals(stateResponseM.getState().stateUUID()); + }); + // cluster state response time with term check enabled on datanode + isTermVersionCheckEnabled.set(true); + { + List latencies = new ArrayList<>(); + IntStream.range(0, 50).forEachOrdered(n1 -> { + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + long start = System.currentTimeMillis(); + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(clusterStateRequest).actionGet(); + latencies.add(System.currentTimeMillis() - start); + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + assertThat(stateResponse.getState().metadata().indices().size(), is(indices.size())); + Map fieldMappings = (Map) stateResponse.getState() + .metadata() + .index(indices.get(0)) + .mapping() + .sourceAsMap() + .get("properties"); + + assertThat(fieldMappings.size(), is(10000)); + }); + Collections.sort(latencies); + + logger.info("cluster().state() fetch with Term Version enabled took {} milliseconds", (latencies.get(latencies.size() / 2))); + } + // cluster state response time with term check disabled on datanode + isTermVersionCheckEnabled.set(false); + { + List latencies = new ArrayList<>(); + IntStream.range(0, 50).forEachOrdered(n1 -> { + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + long start = System.currentTimeMillis(); + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(clusterStateRequest).actionGet(); + latencies.add(System.currentTimeMillis() - start); + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + assertThat(stateResponse.getState().metadata().indices().size(), is(indices.size())); + Map typeProperties = (Map) stateResponse.getState() + .metadata() + .index(indices.get(0)) + .mapping() + .sourceAsMap() + .get("properties"); + assertThat(typeProperties.size(), is(10000)); + + }); + Collections.sort(latencies); + logger.info("cluster().state() fetch with Term Version disabled took {} milliseconds", (latencies.get(latencies.size() / 2))); + } + + } + + private void stubClusterTermResponse(String master) { + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance(TransportService.class, master); + primaryService.addRequestHandlingBehavior(GetTermVersionAction.NAME, (handler, request, channel, task) -> { + if (isTermVersionCheckEnabled.get()) { + handler.messageReceived(request, channel, task); + } else { + // always return response that does not match + channel.sendResponse(new GetTermVersionResponse(new ClusterStateTermVersion(new ClusterName("test"), "1", -1, -1))); + } + }); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index c394a1f631690..f97950f2652a3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -84,6 +84,7 @@ import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; @@ -711,9 +712,11 @@ public static final IndexShard newIndexShard( SegmentReplicationCheckpointPublisher.EMPTY, null, null, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, nodeId, - null + null, + DefaultRemoteStoreSettings.INSTANCE, + false, + IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 30edea6551067..669e24f9fb555 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -31,6 +31,9 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -58,6 +61,20 @@ public void enablePreferPrimaryBalance() { ); } + public void setAllocationRelocationStrategy(boolean preferPrimaryBalance, boolean preferPrimaryRebalance, float buffer) { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance) + .put(PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance) + .put(PRIMARY_SHARD_REBALANCE_BUFFER.getKey(), buffer) + ) + ); + } + /** * This test verifies that the overall primary balance is attained during allocation. This test verifies primary * balance per index and across all indices is maintained. @@ -87,7 +104,7 @@ public void testGlobalPrimaryAllocation() throws Exception { state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); - verifyPrimaryBalance(); + verifyPrimaryBalance(0.0f); } /** @@ -224,6 +241,70 @@ public void testAllocationWithDisruption() throws Exception { verifyPerIndexPrimaryBalance(); } + /** + * Similar to testSingleIndexShardAllocation test but creates multiple indices, multiple nodes adding in and getting + * removed. The test asserts post each such event that primary shard distribution is balanced for each index as well as across the nodes + * when the PREFER_PRIMARY_SHARD_REBALANCE is set to true + */ + public void testAllocationAndRebalanceWithDisruption() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 2; + final int maxShardCount = 2; + // Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in + // and preventing primary relocations + final int nodeCount = randomIntBetween(5, 10); + final int numberOfIndices = randomIntBetween(1, 10); + final float buffer = randomIntBetween(1, 4) * 0.10f; + + logger.info("--> Creating {} nodes", nodeCount); + final List nodeNames = new ArrayList<>(); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + setAllocationRelocationStrategy(true, true, buffer); + + int shardCount, replicaCount; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(1, maxReplicaCount); + logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + ensureGreen(TimeValue.timeValueSeconds(60)); + if (logger.isTraceEnabled()) { + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + } + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + final int additionalNodeCount = randomIntBetween(1, 5); + logger.info("--> Adding {} nodes", additionalNodeCount); + + internalCluster().startNodes(additionalNodeCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + int nodeCountToStop = additionalNodeCount; + while (nodeCountToStop > 0) { + internalCluster().stopRandomDataNode(); + // give replica a chance to promote as primary before terminating node containing the replica + ensureGreen(TimeValue.timeValueSeconds(60)); + nodeCountToStop--; + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info("--> Cluster state post nodes stop {}", state); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + } + /** * Utility method which ensures cluster has balanced primary shard distribution across a single index. * @throws Exception exception @@ -263,7 +344,7 @@ private void verifyPerIndexPrimaryBalance() throws Exception { }, 60, TimeUnit.SECONDS); } - private void verifyPrimaryBalance() throws Exception { + private void verifyPrimaryBalance(float buffer) throws Exception { assertBusy(() -> { final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState(); RoutingNodes nodes = currentState.getRoutingNodes(); @@ -278,7 +359,7 @@ private void verifyPrimaryBalance() throws Exception { .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index f2cb7c9c6bfc8..d2f1e6313db07 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -89,8 +89,8 @@ public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Ex Index index = resolveIndex(INDEX_NAME); Index anotherIndex = resolveIndex(ANOTHER_INDEX); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNode); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); - assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), true); + assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabledOrRemoteNode(), false); + assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabledOrRemoteNode(), true); } public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Exception { @@ -119,8 +119,8 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex Index index = resolveIndex(INDEX_NAME); Index anotherIndex = resolveIndex(ANOTHER_INDEX); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNode); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), true); - assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); + assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabledOrRemoteNode(), true); + assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabledOrRemoteNode(), false); } public void testReplicationTypesOverrideNotAllowed_IndexAPI() { diff --git a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java new file mode 100644 index 0000000000000..4d1964326820e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java @@ -0,0 +1,198 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.node.IoUsageStats; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.rest.AbstractRestChannel; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.admin.indices.RestGetAliasesAction; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AdmissionForClusterManagerIT extends OpenSearchIntegTestCase { + + private static final Logger LOGGER = LogManager.getLogger(AdmissionForClusterManagerIT.class); + + public static final String INDEX_NAME = "test_index"; + + private String clusterManagerNodeId; + private String datanode; + private ResourceUsageCollectorService cMResourceCollector; + + private static final Settings DISABLE_ADMISSION_CONTROL = Settings.builder() + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .build(); + + private static final Settings ENFORCE_ADMISSION_CONTROL = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 50) + .build(); + + @Before + public void init() { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(DISABLE_ADMISSION_CONTROL).build() + ); + datanode = internalCluster().startDataOnlyNode(Settings.builder().put(DISABLE_ADMISSION_CONTROL).build()); + + ensureClusterSizeConsistency(); + ensureGreen(); + + // Disable the automatic resource collection + clusterManagerNodeId = internalCluster().clusterService(clusterManagerNode).localNode().getId(); + cMResourceCollector = internalCluster().getClusterManagerNodeInstance(ResourceUsageCollectorService.class); + cMResourceCollector.stop(); + + // Enable admission control + client().admin().cluster().prepareUpdateSettings().setTransientSettings(ENFORCE_ADMISSION_CONTROL).execute().actionGet(); + } + + public void testAdmissionControlEnforced() throws Exception { + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 99, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}")); + + // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); + aliasesRequest.aliases("alias1"); + try { + dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + fail("expected failure"); + } catch (Exception e) { + assertTrue(e instanceof OpenSearchRejectedExecutionException); + assertTrue(e.getMessage().contains("CPU usage admission controller rejected the request")); + assertTrue(e.getMessage().contains("[indices:admin/aliases/get]")); + assertTrue(e.getMessage().contains("action-type [CLUSTER_ADMIN]")); + } + + client().admin().cluster().prepareUpdateSettings().setTransientSettings(DISABLE_ADMISSION_CONTROL).execute().actionGet(); + GetAliasesResponse getAliasesResponse = dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); + + AdmissionControlService admissionControlServiceCM = internalCluster().getClusterManagerNodeInstance(AdmissionControlService.class); + + AdmissionControllerStats admissionStats = getAdmissionControlStats(admissionControlServiceCM).get( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER + ); + + assertEquals(admissionStats.rejectionCount.get(AdmissionControlActionType.CLUSTER_ADMIN.getType()).longValue(), 1); + assertNull(admissionStats.rejectionCount.get(AdmissionControlActionType.SEARCH.getType())); + assertNull(admissionStats.rejectionCount.get(AdmissionControlActionType.INDEXING.getType())); + } + + public void testAdmissionControlEnabledOnNoBreach() throws InterruptedException { + // CPU usage is less than threshold 50% + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 35, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}").execute().actionGet()); + + // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); + aliasesRequest.aliases("alias1"); + GetAliasesResponse getAliasesResponse = dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); + } + + public void testAdmissionControlMonitorOnBreach() throws InterruptedException { + admissionControlDisabledOnBreach( + Settings.builder().put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()).build() + ); + } + + public void testAdmissionControlDisabledOnBreach() throws InterruptedException { + admissionControlDisabledOnBreach(DISABLE_ADMISSION_CONTROL); + } + + public void admissionControlDisabledOnBreach(Settings admission) throws InterruptedException { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(admission).execute().actionGet(); + + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 97, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}").execute().actionGet()); + + // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); + aliasesRequest.aliases("alias1"); + GetAliasesResponse getAliasesResponse = dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); + + } + + public void testAdmissionControlResponseStatus() throws Exception { + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 99, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}")); + + // Read API on ClusterManager + FakeRestRequest aliasesRequest = new FakeRestRequest(); + aliasesRequest.params().put("name", "alias1"); + CountDownLatch waitForResponse = new CountDownLatch(1); + AtomicReference aliasResponse = new AtomicReference<>(); + AbstractRestChannel channel = new AbstractRestChannel(aliasesRequest, true) { + + @Override + public void sendResponse(RestResponse response) { + waitForResponse.countDown(); + aliasResponse.set(response); + } + }; + + RestGetAliasesAction restHandler = internalCluster().getInstance(RestGetAliasesAction.class, datanode); + restHandler.handleRequest(aliasesRequest, channel, internalCluster().getInstance(NodeClient.class, datanode)); + + waitForResponse.await(); + assertEquals(RestStatus.TOO_MANY_REQUESTS, aliasResponse.get().status()); + } + + @Override + public void tearDown() throws Exception { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(DISABLE_ADMISSION_CONTROL).execute().actionGet(); + super.tearDown(); + } + + Map getAdmissionControlStats(AdmissionControlService admissionControlService) { + Map acStats = new HashMap<>(); + for (AdmissionControllerStats admissionControllerStats : admissionControlService.stats().getAdmissionControllerStatsList()) { + acStats.put(admissionControllerStats.getAdmissionControllerName(), admissionControllerStats); + } + return acStats; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 88d6f6897ee68..0c35f91121059 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -8,13 +8,31 @@ package org.opensearch.remotemigration; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -22,8 +40,16 @@ public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected Path segmentRepoPath; protected Path translogRepoPath; - boolean addRemote = false; + Settings extraSettings = Settings.EMPTY; + + private final List documentKeys = List.of( + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5) + ); protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { @@ -34,12 +60,12 @@ protected Settings nodeSettings(int nodeOrdinal) { logger.info("Adding remote store node"); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) + .put(extraSettings) .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) - .put("discovery.initial_state_timeout", "500ms") .build(); } else { logger.info("Adding docrep node"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("discovery.initial_state_timeout", "500ms").build(); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); } } @@ -47,4 +73,88 @@ protected Settings nodeSettings(int nodeOrdinal) { protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); } + + protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() + ); + } + + public void initDocRepToRemoteMigration() { + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + ) + .get() + .isAcknowledged() + ); + } + + public BulkResponse indexBulk(String indexName, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + final IndexRequest request = client().prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) + .request(); + bulkRequest.add(request); + } + return client().bulk(bulkRequest).actionGet(); + } + + private void indexSingleDoc(String indexName) { + IndexResponse indexResponse = client().prepareIndex(indexName).setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete(indexName, "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex(indexName).setSource("auto", true).get(); + } + + public class AsyncIndexingService { + private String indexName; + private AtomicLong indexedDocs = new AtomicLong(0); + private AtomicBoolean finished = new AtomicBoolean(); + private Thread indexingThread; + + AsyncIndexingService(String indexName) { + this.indexName = indexName; + } + + public void startIndexing() { + indexingThread = getIndexingThread(); + indexingThread.start(); + } + + public void stopIndexing() throws InterruptedException { + finished.set(true); + indexingThread.join(); + } + + public long getIndexedDocs() { + return indexedDocs.get(); + } + + private Thread getIndexingThread() { + return new Thread(() -> { + while (finished.get() == false) { + indexSingleDoc(indexName); + long currentDocCount = indexedDocs.incrementAndGet(); + logger.info("Completed ingestion of {} docs", currentDocCount); + + } + }); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java new file mode 100644 index 0000000000000..34b60d5f3e9b3 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -0,0 +1,530 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexService; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.indices.IndexingMemoryController; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteDualReplicationIT extends MigrationBaseTestCase { + private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; + private final String REMOTE_PRI_DOCREP_REMOTE_REP = "remote-primary-docrep-remote-replica"; + private final String FAILOVER_REMOTE_TO_DOCREP = "failover-remote-to-docrep"; + + @Override + protected Collection> nodePlugins() { + /* Adding the following mock plugins: + - InternalSettingsPlugin : To override default intervals of retention lease and global ckp sync + - MockFsRepositoryPlugin and MockTransportService.TestPlugin: To ensure remote interactions are not no-op and retention leases are properly propagated + */ + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(InternalSettingsPlugin.class, MockFsRepositoryPlugin.class, MockTransportService.TestPlugin.class) + ).collect(Collectors.toList()); + } + + /* + Scenario: + - Starts 2 docrep backed node + - Creates index with 1 replica + - Index some docs + - Start 1 remote backed node + - Move primary copy from docrep to remote through _cluster/reroute + - Index some more docs + - Assert primary-replica consistency + */ + public void testRemotePrimaryDocRepReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep data nodes"); + internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 1 replica"); + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + createIndex(REMOTE_PRI_DOCREP_REP, oneReplica); + ensureGreen(REMOTE_PRI_DOCREP_REP); + + int initialBatch = randomIntBetween(1, 1000); + logger.info("---> Indexing {} docs", initialBatch); + indexBulk(REMOTE_PRI_DOCREP_REP, initialBatch); + + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + String primaryShardHostingNode = primaryNodeName(REMOTE_PRI_DOCREP_REP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(REMOTE_PRI_DOCREP_REP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REP); + ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().get().getState(); + String primaryShardHostingNodeId = clusterState.getRoutingTable() + .index(REMOTE_PRI_DOCREP_REP) + .shard(0) + .primaryShard() + .currentNodeId(); + assertTrue(clusterState.getNodes().get(primaryShardHostingNodeId).isRemoteStoreNode()); + + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing another {} docs", secondBatch); + indexBulk(REMOTE_PRI_DOCREP_REP, secondBatch); + // Defensive check to ensure that doc count in replica shard catches up to the primary copy + refreshAndWaitForReplication(REMOTE_PRI_DOCREP_REP); + assertReplicaAndPrimaryConsistency(REMOTE_PRI_DOCREP_REP, initialBatch, secondBatch); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Index some docs + - Move primary copy from docrep to remote through _cluster/reroute + - Starts another remote backed data node + - Expands index to 2 replicas. One replica copy lies in remote backed node and other in docrep backed node + - Index some more docs + - Assert primary-replica consistency + */ + public void testRemotePrimaryDocRepAndRemoteReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data nodes"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + Settings zeroReplicas = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + createIndex(REMOTE_PRI_DOCREP_REMOTE_REP, zeroReplicas); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + int firstBatch = randomIntBetween(1, 100); + logger.info("---> Indexing {} docs", firstBatch); + indexBulk(REMOTE_PRI_DOCREP_REMOTE_REP, firstBatch); + + String primaryShardHostingNode = primaryNodeName(REMOTE_PRI_DOCREP_REMOTE_REP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(REMOTE_PRI_DOCREP_REMOTE_REP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().get().getState(); + String primaryShardHostingNodeId = clusterState.getRoutingTable() + .index(REMOTE_PRI_DOCREP_REMOTE_REP) + .shard(0) + .primaryShard() + .currentNodeId(); + assertTrue(clusterState.getNodes().get(primaryShardHostingNodeId).isRemoteStoreNode()); + + logger.info("---> Starting another remote enabled node"); + internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + logger.info("---> Expanding index to 2 replica copies"); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build(); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(REMOTE_PRI_DOCREP_REMOTE_REP) + .setSettings(twoReplicas) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing another {} docs", secondBatch); + indexBulk(REMOTE_PRI_DOCREP_REMOTE_REP, secondBatch); + // Defensive check to ensure that doc count in replica shard catches up to the primary copy + refreshAndWaitForReplication(REMOTE_PRI_DOCREP_REMOTE_REP); + assertReplicaAndPrimaryConsistency(REMOTE_PRI_DOCREP_REMOTE_REP, firstBatch, secondBatch); + } + + /* + Checks if retention leases are published on primary shard and it's docrep copies, but not on remote copies + */ + public void testRetentionLeasePresentOnDocrepReplicaButNotRemote() throws Exception { + /* Reducing indices.memory.shard_inactive_time to force a flush and trigger translog sync, + instead of relying on Global CKP Sync action which doesn't run on remote enabled copies + + Under steady state, RetentionLeases would be on (GlobalCkp + 1) on a + docrep enabled shard copy and (GlobalCkp) for a remote enabled shard copy. + This is because we block translog sync on remote enabled shard copies during the GlobalCkpSync background task. + + RLs on remote enabled copies are brought up to (GlobalCkp + 1) upon a flush request issued by IndexingMemoryController + when the shard becomes inactive after SHARD_INACTIVE_TIME_SETTING interval. + + Flush triggers a force sync of translog which bumps the RetentionLease sequence number along with it + */ + extraSettings = Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), "3s").build(); + testRemotePrimaryDocRepAndRemoteReplica(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertBusy(() -> { + for (ShardStats shardStats : internalCluster().client() + .admin() + .indices() + .prepareStats(REMOTE_PRI_DOCREP_REMOTE_REP) + .get() + .getShards()) { + ShardRouting shardRouting = shardStats.getShardRouting(); + DiscoveryNode discoveryNode = nodes.get(shardRouting.currentNodeId()); + RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases(); + if (shardRouting.primary()) { + // Primary copy should be on remote node and should have retention leases + assertTrue(discoveryNode.isRemoteStoreNode()); + assertCheckpointsConsistency(shardStats); + assertRetentionLeaseConsistency(shardStats, retentionLeases); + } else { + // Checkpoints and Retention Leases are not synced to remote replicas + if (discoveryNode.isRemoteStoreNode()) { + assertTrue(shardStats.getRetentionLeaseStats().retentionLeases().leases().isEmpty()); + } else { + // Replica copy on docrep node should have retention leases + assertCheckpointsConsistency(shardStats); + assertRetentionLeaseConsistency(shardStats, retentionLeases); + } + } + } + }); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Move primary copy from docrep to remote through _cluster/reroute + - Expands index to 1 replica + - Stops remote enabled node + - Ensure doc count is same after failover + - Index some more docs to ensure working of failed-over primary + */ + public void testFailoverRemotePrimaryToDocrepReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data nodes"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + Settings excludeRemoteNode = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(FAILOVER_REMOTE_TO_DOCREP, excludeRemoteNode); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + initDocRepToRemoteMigration(); + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + logger.info("---> Starting doc ingestion in parallel thread"); + AsyncIndexingService asyncIndexingService = new AsyncIndexingService(FAILOVER_REMOTE_TO_DOCREP); + asyncIndexingService.startIndexing(); + + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_DOCREP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + logger.info("---> Expanding index to 1 replica copy"); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(FAILOVER_REMOTE_TO_DOCREP) + .setSettings(twoReplicas) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Stopping indexing thread"); + asyncIndexingService.stopIndexing(); + + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_DOCREP); + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_DOCREP) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long initialPrimaryDocCount = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + initialPrimaryDocCount = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + int firstBatch = (int) asyncIndexingService.getIndexedDocs(); + assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_DOCREP, firstBatch, 0); + + logger.info("---> Stop remote store enabled node"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName)); + ensureStableCluster(2); + ensureYellow(FAILOVER_REMOTE_TO_DOCREP); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_DOCREP).setDocs(true).get().asMap(); + nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long primaryDocCountAfterFailover = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertFalse(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + primaryDocCountAfterFailover = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + assertEquals(initialPrimaryDocCount, primaryDocCountAfterFailover); + + logger.info("---> Index some more docs to ensure that the failed over primary is ingesting new docs"); + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing {} more docs", secondBatch); + indexBulk(FAILOVER_REMOTE_TO_DOCREP, secondBatch); + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_DOCREP); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_DOCREP).setDocs(true).get().asMap(); + assertEquals(1, shardStatsMap.size()); + shardStatsMap.forEach( + (shardRouting, shardStats) -> { assertEquals(firstBatch + secondBatch, shardStats.getStats().getDocs().getCount()); } + ); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Move primary copy from docrep to remote through _cluster/reroute + - Expands index to 1 replica + - Stops remote enabled node + - Ensure doc count is same after failover + - Index some more docs to ensure working of failed-over primary + - Starts another remote node + - Move primary copy from docrep to remote through _cluster/reroute + - Ensure that remote store is seeded in the new remote node by asserting remote uploads from that node > 0 + */ + public void testFailoverRemotePrimaryToDocrepReplicaReseedToRemotePrimary() throws Exception { + testFailoverRemotePrimaryToDocrepReplica(); + + logger.info("---> Removing replica copy"); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings(FAILOVER_REMOTE_TO_DOCREP) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + logger.info("---> Starting a new remote enabled node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_DOCREP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_DOCREP) + .get() + .asMap(); + DiscoveryNodes discoveryNodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertEquals(1, shardStatsMap.size()); + shardStatsMap.forEach((shardRouting, shardStats) -> { + if (discoveryNodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()) { + RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + } + }); + } + + private void assertReplicaAndPrimaryConsistency(String indexName, int firstBatch, int secondBatch) throws Exception { + assertBusy(() -> { + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(indexName) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + CommonStats shardStats = shardStatsMap.get(shardRouting).getStats(); + if (shardRouting.primary()) { + assertEquals(firstBatch + secondBatch, shardStats.getDocs().getCount()); + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + RemoteSegmentStats remoteSegmentStats = shardStats.getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + } else { + boolean remoteNode = nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode(); + assertEquals( + "Mismatched doc count. Is this on remote node ? " + remoteNode, + firstBatch + secondBatch, + shardStats.getDocs().getCount() + ); + RemoteSegmentStats remoteSegmentStats = shardStats.getSegments().getRemoteSegmentStats(); + if (remoteNode) { + assertTrue(remoteSegmentStats.getDownloadBytesStarted() > 0); + assertTrue(remoteSegmentStats.getTotalDownloadTime() > 0); + } else { + assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(0, remoteSegmentStats.getTotalUploadTime()); + } + } + } + }); + } + + /** + * For a docrep enabled shard copy or a primary shard copy, + * asserts that the stored Retention Leases equals to 1 + maxSeqNo ingested on the node + * + * @param shardStats ShardStats object from NodesStats API + * @param retentionLeases RetentionLeases from NodesStats API + */ + private static void assertRetentionLeaseConsistency(ShardStats shardStats, RetentionLeases retentionLeases) { + long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + for (RetentionLease rl : retentionLeases.leases()) { + assertEquals(maxSeqNo + 1, rl.retainingSequenceNumber()); + } + } + + /** + * For a docrep enabled shard copy or a primary shard copy, + * asserts that local and global checkpoints are up-to-date with maxSeqNo of doc operations + * + * @param shardStats ShardStats object from NodesStats API + */ + private static void assertCheckpointsConsistency(ShardStats shardStats) { + long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + long localCkp = shardStats.getSeqNoStats().getLocalCheckpoint(); + long globalCkp = shardStats.getSeqNoStats().getGlobalCheckpoint(); + + assertEquals(maxSeqNo, localCkp); + assertEquals(maxSeqNo, globalCkp); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java new file mode 100644 index 0000000000000..b1c429a45a1a1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Arrays.asList; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemotePrimaryRelocationIT extends MigrationBaseTestCase { + protected int maximumNumberOfShards() { + return 1; + } + + // ToDo : Fix me when we support migration of replicas + protected int maximumNumberOfReplicas() { + return 0; + } + + protected Collection> nodePlugins() { + return asList(MockTransportService.TestPlugin.class); + } + + public void testMixedModeRelocation() throws Exception { + String docRepNode = internalCluster().startNode(); + Client client = internalCluster().client(docRepNode); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // create shard with 0 replica and 1 shard + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); + ensureGreen("test"); + + AtomicInteger numAutoGenDocs = new AtomicInteger(); + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = getIndexingThread(finished, numAutoGenDocs); + + refresh("test"); + + // add remote node in mixed mode cluster + addRemote = true; + String remoteNode = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + String remoteNode2 = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + // Index some more docs + int currentDoc = numAutoGenDocs.get(); + int finalCurrentDoc1 = currentDoc; + waitUntil(() -> numAutoGenDocs.get() > finalCurrentDoc1 + 5); + + logger.info("--> relocating from {} to {} ", docRepNode, remoteNode); + client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertEquals(0, clusterHealthResponse.getRelocatingShards()); + assertEquals(remoteNode, primaryNodeName("test")); + logger.info("--> relocation from docrep to remote complete"); + + // Index some more docs + currentDoc = numAutoGenDocs.get(); + int finalCurrentDoc = currentDoc; + waitUntil(() -> numAutoGenDocs.get() > finalCurrentDoc + 5); + + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand("test", 0, remoteNode, remoteNode2)) + .execute() + .actionGet(); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertEquals(0, clusterHealthResponse.getRelocatingShards()); + assertEquals(remoteNode2, primaryNodeName("test")); + + logger.info("--> relocation from remote to remote complete"); + + finished.set(true); + indexingThread.join(); + refresh("test"); + OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + OpenSearchAssertions.assertHitCount( + client().prepareSearch("test") + .setTrackTotalHits(true)// extra paranoia ;) + .setQuery(QueryBuilders.termQuery("auto", true)) + .get(), + numAutoGenDocs.get() + ); + + } + + public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { + String docRepNode = internalCluster().startNode(); + Client client = internalCluster().client(docRepNode); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // create shard with 0 replica and 1 shard + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); + ensureGreen("test"); + + AtomicInteger numAutoGenDocs = new AtomicInteger(); + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = getIndexingThread(finished, numAutoGenDocs); + + refresh("test"); + + // add remote node in mixed mode cluster + addRemote = true; + String remoteNode = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + setFailRate(REPOSITORY_NAME, 100); + + logger.info("--> relocating from {} to {} ", docRepNode, remoteNode); + client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(5)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertTrue(clusterHealthResponse.getRelocatingShards() == 1); + setFailRate(REPOSITORY_NAME, 0); + Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000)); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(45)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.getRelocatingShards() == 0); + logger.info("--> remote to remote relocation complete"); + finished.set(true); + indexingThread.join(); + refresh("test"); + OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + OpenSearchAssertions.assertHitCount( + client().prepareSearch("test") + .setTrackTotalHits(true)// extra paranoia ;) + .setQuery(QueryBuilders.termQuery("auto", true)) + .get(), + numAutoGenDocs.get() + ); + } + + private static Thread getIndexingThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) { + Thread indexingThread = new Thread(() -> { + while (finished.get() == false && numAutoGenDocs.get() < 10_000) { + IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex("test").setSource("auto", true).get(); + numAutoGenDocs.incrementAndGet(); + } + }); + indexingThread.start(); + return indexingThread; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index 869032a84c2c2..67316ed0e6e6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -44,7 +44,6 @@ public Settings indexSettings() { .build(); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9191") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 21ce4be9981fb..181f242aecd09 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -18,6 +18,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; import org.opensearch.client.Requests; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; @@ -26,6 +27,8 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; @@ -43,6 +46,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; @@ -50,7 +54,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -257,6 +261,84 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); } + /** + * In this test, we validate presence of remote_store custom data in index metadata for standard index creation and + * on snapshot restore. + */ + public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndexName1version1 = indexName1 + "-restored-1"; + String restoredIndexName1version2 = indexName1 + "-restored-2"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + indexDocuments(client, indexName1, randomIntBetween(5, 10)); + ensureGreen(indexName1); + validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertTrue(snapshotInfo.successfulShards() > 0); + assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); + + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1version1) + .get(); + assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); + ensureGreen(restoredIndexName1version1); + validatePathType(restoredIndexName1version1, PathType.FIXED, PathHashAlgorithm.FNV_1A); + + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .get(); + + restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1version2) + .get(); + assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); + ensureGreen(restoredIndexName1version2); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + + // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings); + ensureGreen(indexName2); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + + // Validating that custom data has not changed for indexes which were created before the cluster setting got updated + validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); + } + + private void validatePathType(String index, PathType pathType, PathHashAlgorithm pathHashAlgorithm) { + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + // Validate that the remote_store custom data is present in index metadata for the created index. + Map remoteCustomData = state.metadata().index(index).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + assertNotNull(remoteCustomData); + assertEquals(pathType.name(), remoteCustomData.get(PathType.NAME)); + assertEquals(pathHashAlgorithm.name(), remoteCustomData.get(PathHashAlgorithm.NAME)); + } + public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 3899c8a80f442..ba90cbe96e157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -28,6 +28,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; @@ -56,8 +57,11 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -187,7 +191,122 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - private Settings defaultIndexSettings() { + public static Settings remoteStoreClusterSettings(String name, Path path) { + return remoteStoreClusterSettings(name, path, name, path); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + segmentRepoType, + translogRepoName, + translogRepoPath, + translogRepoType, + false + ) + ); + return settingsBuilder.build(); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); + return settingsBuilder.build(); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath, + boolean withRateLimiterAttributes + ) { + return buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + ReloadableFsRepository.TYPE, + translogRepoName, + translogRepoPath, + ReloadableFsRepository.TYPE, + withRateLimiterAttributes + ); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType, + boolean withRateLimiterAttributes + ) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, segmentRepoType) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, translogRepoType) + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(stateRepoTypeAttributeKey, segmentRepoType) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + + if (withRateLimiterAttributes) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); + } + + return settings.build(); + } + + Settings defaultIndexSettings() { return Settings.builder() .put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index e1997fea3433a..46e5b7aa28318 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -31,6 +31,7 @@ import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -56,7 +57,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -189,7 +190,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); + int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); @@ -224,7 +225,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -243,7 +244,7 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -469,7 +470,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval()); } private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 6059abce53c8b..4a8b00ea45738 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -38,6 +38,8 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -56,6 +58,8 @@ import java.util.List; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.global; +import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -164,4 +168,23 @@ private void runLargeStringAggregationTest(AggregationBuilder aggregation) { } assertTrue("Exception should have been thrown", exceptionThrown); } + + public void testAggsOnEmptyShards() { + // Create index with 5 shards but only 1 doc + assertAcked( + prepareCreate( + "idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 5).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("score", "type=integer") + ); + client().prepareIndex("idx").setId("1").setSource("score", "5").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // Validate global agg does not throw an exception + assertSearchResponse( + client().prepareSearch("idx").addAggregation(global("global").subAggregation(stats("value_stats").field("score"))).get() + ); + + // Validate non-global agg does not throw an exception + assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java index 5a38ba670f1dc..a743f22a2ff77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java @@ -26,6 +26,7 @@ import java.util.Collection; import java.util.List; +import static org.opensearch.indices.IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -50,23 +51,25 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked( prepareCreate( "idx", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), false) ).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer") ); waitForRelocation(ClusterHealthStatus.GREEN); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get(); - refresh("idx"); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100") + ); waitForRelocation(ClusterHealthStatus.GREEN); refresh(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java index a6554271a0bc5..2efec6a63e6c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java @@ -30,6 +30,11 @@ */ public class SimpleNestedExplainIT extends OpenSearchIntegTestCase { + @Override + protected int numberOfShards() { + return 1; + } + /* * Tests the explain output for multiple docs. Concurrent search with multiple slices is tested * here as call to indexRandomForMultipleSlices is made and compared with explain output for @@ -70,7 +75,23 @@ public void testExplainMultipleDocs() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - indexRandomForMultipleSlices("test"); + client().prepareIndex("test") + .setId("2") + .setSource( + jsonBuilder().startObject() + .field("field1", "value2") + .startArray("nested1") + .startObject() + .field("n_field1", "n_value2") + .endObject() + .startObject() + .field("n_field1", "n_value2") + .endObject() + .endArray() + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); // Turn off the concurrent search setting to test search with non-concurrent search client().admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 31678d3f018a1..cae543506f919 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -112,7 +112,10 @@ public static Collection parameters() { @BeforeClass public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); + // Lower bound can't be small(such as 60), simpleQueryStringQuery("foo Bar 19 127.0.0.1") in testDocWithAllTypes + // will create many clauses of BooleanClause, In that way, it will throw too_many_nested_clauses exception. + // So we need to set a higher bound(such as 80) to avoid failures. + CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(80, 100); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 7117818451e14..e76587653e99a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -39,6 +39,7 @@ import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -151,6 +152,62 @@ public void testParallelRestoreOperations() { assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } + /** + * In this test, we test that an index created does not have any remote_store custom data in index metadata at the + * time of index creation and after snapshot restore. + */ + public void testNoRemoteStoreCustomDataOnIndexCreationAndRestore() { + String indexName1 = "testindex1"; + String repoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath = randomRepoPath().toAbsolutePath(); + logger.info("Path [{}]", absolutePath); + String restoredIndexName1 = indexName1 + "-restored"; + String expectedValue = "expected"; + + Client client = client(); + // Write a document + String docId = Integer.toString(randomInt()); + index(indexName1, "_doc", docId, "value", expectedValue); + + createRepository(repoName, "fs", absolutePath); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // Validate that the remote_store custom data is not present in index metadata for the created index. + assertNull(state.metadata().index(indexName1).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(repoName, snapshotName1) + .setWaitForCompletion(false) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + assertThat(restoreSnapshotResponse1.status(), equalTo(RestStatus.ACCEPTED)); + ensureGreen(restoredIndexName1); + assertThat(client.prepareGet(restoredIndexName1, docId).get().isExists(), equalTo(true)); + + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // Validate that the remote_store custom data is not present in index metadata for the restored index. + assertNull(state.metadata().index(restoredIndexName1).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + } + public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index c649c4ab13e7e..b019bb57743c9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -303,7 +303,7 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio // Verify index setting isSegRepEnabled. Index index = resolveIndex(RESTORED_INDEX_NAME); IndicesService indicesService = internalCluster().getInstance(IndicesService.class); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); + assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabledOrRemoteNode(), false); } /** diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index b19bf9590f43b..5e2b62614fc47 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -281,6 +281,8 @@ import org.opensearch.action.support.AutoCreateIndex; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.action.support.TransportAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.TransportGetTermVersionAction; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.TermVectorsAction; import org.opensearch.action.termvectors.TransportMultiTermVectorsAction; @@ -292,6 +294,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.NamedRegistry; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.AbstractModule; import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.multibindings.MapBinder; @@ -614,6 +617,7 @@ public void reg actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); + actions.register(GetTermVersionAction.INSTANCE, TransportGetTermVersionAction.class); actions.register(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); actions.register(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); actions.register(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); @@ -1049,8 +1053,9 @@ public RestController getRestController() { *

    * This class is modeled after {@link NamedRegistry} but provides both register and unregister capabilities. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.7.0") public static class DynamicActionRegistry { // This is the unmodifiable actions map created during node bootstrap, which // will continue to link ActionType and TransportAction pairs from core and plugin diff --git a/server/src/main/java/org/opensearch/action/ActionRequest.java b/server/src/main/java/org/opensearch/action/ActionRequest.java index 5313a05ad6fae..7ab87065bef7e 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequest.java +++ b/server/src/main/java/org/opensearch/action/ActionRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; @@ -43,6 +44,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ActionRequest extends TransportRequest { public ActionRequest() { diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index dae931bdd1891..559dad73536e1 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -32,6 +32,7 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -43,6 +44,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ActionType { private final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 4aaa7f1950823..cae465a90446e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -125,9 +125,12 @@ protected void clusterManagerOperation( ? clusterState -> true : clusterState -> clusterState.metadata().version() >= request.waitForMetadataVersion(); + // action will be executed on local node, if either the request is local only (or) the local node has the same cluster-state as + // ClusterManager final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() - ? acceptableClusterStatePredicate - : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); + || !state.nodes().isLocalNodeElectedClusterManager() + ? acceptableClusterStatePredicate + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); if (acceptableClusterStatePredicate.test(state)) { ActionListener.completeWith(listener, () -> buildResponse(request, state)); @@ -231,4 +234,8 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi return new ClusterStateResponse(currentState.getClusterName(), builder.build(), false); } + @Override + protected boolean localExecuteSupportedByAction() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index 1b912518d7e04..fc97d67c6c3af 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -148,7 +148,7 @@ protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplication IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); ShardId shardId = shardRouting.shardId(); - if (indexShard.indexSettings().isSegRepEnabled() == false) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() == false) { return null; } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index a7a13afd2597c..fdba8a42c0170 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -442,7 +442,7 @@ protected long primaryOperationSize(BulkShardRequest request) { @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.PRIMARY_TERM_VALIDATION; } return super.getReplicationMode(indexShard); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index f738c182c06da..3b8a6937815aa 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; @@ -712,7 +713,13 @@ public final String buildDescription() { sb.append("scroll[").append(scroll.keepAlive()).append("], "); } if (source != null) { - sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + sb.append("source["); + try { + sb.append(source.toString(FORMAT_PARAMS)); + } catch (final OpenSearchException ex) { + sb.append(""); + } + sb.append("]"); } else { sb.append("source[]"); } diff --git a/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java b/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java index 786d8cfb6fa1d..a5054b966b2f9 100644 --- a/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; @@ -65,7 +66,7 @@ protected HandledTransportAction( Writeable.Reader requestReader, String executor ) { - this(actionName, true, transportService, actionFilters, requestReader, executor); + this(actionName, true, null, transportService, actionFilters, requestReader, executor); } protected HandledTransportAction( @@ -75,19 +76,49 @@ protected HandledTransportAction( ActionFilters actionFilters, Writeable.Reader requestReader ) { - this(actionName, canTripCircuitBreaker, transportService, actionFilters, requestReader, ThreadPool.Names.SAME); + this(actionName, canTripCircuitBreaker, null, transportService, actionFilters, requestReader, ThreadPool.Names.SAME); } protected HandledTransportAction( String actionName, boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, + TransportService transportService, + ActionFilters actionFilters, + Writeable.Reader requestReader + ) { + this( + actionName, + canTripCircuitBreaker, + admissionControlActionType, + transportService, + actionFilters, + requestReader, + ThreadPool.Names.SAME + ); + } + + protected HandledTransportAction( + String actionName, + boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, TransportService transportService, ActionFilters actionFilters, Writeable.Reader requestReader, String executor ) { super(actionName, actionFilters, transportService.getTaskManager()); - transportService.registerRequestHandler(actionName, executor, false, canTripCircuitBreaker, requestReader, new TransportHandler()); + + transportService.registerRequestHandler( + actionName, + executor, + false, + canTripCircuitBreaker, + admissionControlActionType, + requestReader, + new TransportHandler() + ); + } /** diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index 72aae210d61ae..f71347f6f1d07 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.concurrent.ThreadContext; @@ -52,8 +53,9 @@ /** * Base class for a transport action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TransportAction { public final String actionName; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java index a43d6fb0b1e7a..03fc41e829e3d 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * A based request for cluster-manager based operation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ClusterManagerNodeRequest> extends ActionRequest { public static final TimeValue DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 536ddcdd402e2..080b0d607e991 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -41,6 +41,9 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.RetryableAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionRequest; +import org.opensearch.action.support.clustermanager.term.GetTermVersionResponse; import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; @@ -61,16 +64,22 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Predicate; +import static org.opensearch.Version.V_2_13_0; + /** * A base class for operations that needs to be performed on the cluster-manager node. * @@ -97,7 +106,7 @@ protected TransportClusterManagerNodeAction( Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver ) { - this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + this(actionName, true, null, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } protected TransportClusterManagerNodeAction( @@ -110,7 +119,31 @@ protected TransportClusterManagerNodeAction( Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver ) { - super(actionName, canTripCircuitBreaker, transportService, actionFilters, request); + this( + actionName, + canTripCircuitBreaker, + null, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + + protected TransportClusterManagerNodeAction( + String actionName, + boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, canTripCircuitBreaker, admissionControlActionType, transportService, actionFilters, request); this.transportService = transportService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -252,23 +285,13 @@ protected void doStart(ClusterState clusterState) { }); } } else { - ActionListener delegate = ActionListener.delegateResponse(listener, (delegatedListener, t) -> { - if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { - logger.debug( - () -> new ParameterizedMessage( - "master could not publish cluster state or " - + "stepped down before publishing action [{}], scheduling a retry", - actionName - ), - t - ); - retryOnMasterChange(clusterState, t); - } else { - delegatedListener.onFailure(t); - } - }); threadPool.executor(executor) - .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); + .execute( + ActionRunnable.wrap( + getDelegateForLocalExecute(clusterState), + l -> clusterManagerOperation(task, request, clusterState, l) + ) + ); } } else { if (nodes.getClusterManagerNode() == null) { @@ -276,32 +299,15 @@ protected void doStart(ClusterState clusterState) { retryOnMasterChange(clusterState, null); } else { DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); - final String actionName = getClusterManagerActionName(clusterManagerNode); - transportService.sendRequest( - clusterManagerNode, - actionName, - request, - new ActionListenerResponseHandler(listener, TransportClusterManagerNodeAction.this::read) { - @Override - public void handleException(final TransportException exp) { - Throwable cause = exp.unwrapCause(); - if (cause instanceof ConnectTransportException - || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { - // we want to retry here a bit to see if a new cluster-manager is elected - logger.debug( - "connection exception while trying to forward request with action name [{}] to " - + "master node [{}], scheduling a retry. Error: [{}]", - actionName, - nodes.getClusterManagerNode(), - exp.getDetailedMessage() - ); - retryOnMasterChange(clusterState, cause); - } else { - listener.onFailure(exp); - } - } - } - ); + if (clusterManagerNode.getVersion().onOrAfter(V_2_13_0) && localExecuteSupportedByAction()) { + BiConsumer executeOnLocalOrClusterManager = clusterStateLatestChecker( + this::executeOnLocalNode, + this::executeOnClusterManager + ); + executeOnLocalOrClusterManager.accept(clusterManagerNode, clusterState); + } else { + executeOnClusterManager(clusterManagerNode, clusterState); + } } } } catch (Exception e) { @@ -351,6 +357,114 @@ public void onTimeout(TimeValue timeout) { } }, statePredicate); } + + private ActionListener getDelegateForLocalExecute(ClusterState clusterState) { + return ActionListener.delegateResponse(listener, (delegatedListener, t) -> { + if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { + logger.debug( + () -> new ParameterizedMessage( + "cluster-manager could not publish cluster state or " + + "stepped down before publishing action [{}], scheduling a retry", + actionName + ), + t + ); + + retryOnMasterChange(clusterState, t); + } else { + delegatedListener.onFailure(t); + } + }); + } + + protected BiConsumer clusterStateLatestChecker( + Consumer onLatestLocalState, + BiConsumer onStaleLocalState + ) { + return (clusterManagerNode, clusterState) -> { + transportService.sendRequest( + clusterManagerNode, + GetTermVersionAction.NAME, + new GetTermVersionRequest(), + new TransportResponseHandler() { + @Override + public void handleResponse(GetTermVersionResponse response) { + boolean isLatestClusterStatePresentOnLocalNode = response.matches(clusterState); + logger.trace( + "Received GetTermVersionResponse response : ClusterStateTermVersion {}, latest-on-local {}", + response.getClusterStateTermVersion(), + isLatestClusterStatePresentOnLocalNode + ); + if (isLatestClusterStatePresentOnLocalNode) { + onLatestLocalState.accept(clusterState); + } else { + onStaleLocalState.accept(clusterManagerNode, clusterState); + } + } + + @Override + public void handleException(TransportException exp) { + handleTransportException(clusterManagerNode, clusterState, exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetTermVersionResponse read(StreamInput in) throws IOException { + return new GetTermVersionResponse(in); + } + + } + ); + }; + } + + private void executeOnLocalNode(ClusterState localClusterState) { + Runnable runTask = ActionRunnable.wrap( + getDelegateForLocalExecute(localClusterState), + l -> clusterManagerOperation(task, request, localClusterState, l) + ); + threadPool.executor(executor).execute(runTask); + } + + private void executeOnClusterManager(DiscoveryNode clusterManagerNode, ClusterState clusterState) { + final String actionName = getClusterManagerActionName(clusterManagerNode); + + transportService.sendRequest( + clusterManagerNode, + actionName, + request, + new ActionListenerResponseHandler(listener, TransportClusterManagerNodeAction.this::read) { + @Override + public void handleException(final TransportException exp) { + handleTransportException(clusterManagerNode, clusterState, exp); + } + } + ); + } + + private void handleTransportException(DiscoveryNode clusterManagerNode, ClusterState clusterState, final TransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException + || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { + // we want to retry here a bit to see if a new cluster-manager is elected + + logger.debug( + "connection exception while trying to forward request with action name [{}] to " + + "master node [{}], scheduling a retry. Error: [{}]", + actionName, + clusterManagerNode, + exp.getDetailedMessage() + ); + + retryOnMasterChange(clusterState, cause); + } else { + listener.onFailure(exp); + } + } } /** @@ -372,4 +486,14 @@ protected String getMasterActionName(DiscoveryNode node) { return getClusterManagerActionName(node); } + /** + * Override to true if the transport action can be executed locally and need NOT be executed always on cluster-manager (Read actions). + * The action is executed locally if this method returns true AND + * the ClusterState on local node is in-sync with ClusterManager. + * + * @return - boolean if the action can be run locally + */ + protected boolean localExecuteSupportedByAction() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java index d8cd5af992028..d58487a475bcf 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -59,12 +60,46 @@ protected TransportClusterManagerNodeReadAction( Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver ) { - this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + this( + actionName, + true, + AdmissionControlActionType.CLUSTER_ADMIN, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + + protected TransportClusterManagerNodeReadAction( + String actionName, + boolean checkSizeLimit, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + checkSizeLimit, + null, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); } protected TransportClusterManagerNodeReadAction( String actionName, boolean checkSizeLimit, + AdmissionControlActionType admissionControlActionType, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, @@ -75,6 +110,7 @@ protected TransportClusterManagerNodeReadAction( super( actionName, checkSizeLimit, + admissionControlActionType, transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionAction.java new file mode 100644 index 0000000000000..2401dddd0cab3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.opensearch.action.ActionType; + +/** + * Transport action for fetching cluster term and version + * + * @opensearch.internal + */ +public class GetTermVersionAction extends ActionType { + + public static final GetTermVersionAction INSTANCE = new GetTermVersionAction(); + public static final String NAME = "internal:monitor/term"; + + private GetTermVersionAction() { + super(NAME, GetTermVersionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionRequest.java new file mode 100644 index 0000000000000..507997a1f7e7a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionRequest.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Request object to get cluster term and version + * + * @opensearch.internal + */ +public class GetTermVersionRequest extends ClusterManagerNodeReadRequest { + + public GetTermVersionRequest() {} + + public GetTermVersionRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionResponse.java new file mode 100644 index 0000000000000..0906abe57d547 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionResponse.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response object of cluster term + * + * @opensearch.internal + */ +public class GetTermVersionResponse extends ActionResponse { + + private final ClusterStateTermVersion clusterStateTermVersion; + + public GetTermVersionResponse(ClusterStateTermVersion clusterStateTermVersion) { + this.clusterStateTermVersion = clusterStateTermVersion; + } + + public GetTermVersionResponse(StreamInput in) throws IOException { + super(in); + this.clusterStateTermVersion = new ClusterStateTermVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterStateTermVersion.writeTo(out); + } + + public ClusterStateTermVersion getClusterStateTermVersion() { + return clusterStateTermVersion; + } + + public boolean matches(ClusterState clusterState) { + return clusterStateTermVersion != null && clusterStateTermVersion.equals(new ClusterStateTermVersion(clusterState)); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java new file mode 100644 index 0000000000000..4752a99c910e4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for obtaining cluster term and version from cluster-manager + * + * @opensearch.internal + */ +public class TransportGetTermVersionAction extends TransportClusterManagerNodeReadAction { + + private final Logger logger = LogManager.getLogger(getClass()); + + @Inject + public TransportGetTermVersionAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetTermVersionAction.NAME, + false, + transportService, + clusterService, + threadPool, + actionFilters, + GetTermVersionRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetTermVersionResponse read(StreamInput in) throws IOException { + return new GetTermVersionResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(GetTermVersionRequest request, ClusterState state) { + // cluster state term and version needs to be retrieved even on a fully blocked cluster + return null; + } + + @Override + protected void clusterManagerOperation( + GetTermVersionRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + ActionListener.completeWith(listener, () -> buildResponse(request, state)); + } + + private GetTermVersionResponse buildResponse(GetTermVersionRequest request, ClusterState state) { + return new GetTermVersionResponse(new ClusterStateTermVersion(state)); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/package-info.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/package-info.java new file mode 100644 index 0000000000000..229c405df2d7c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Term transport handler. */ +package org.opensearch.action.support.clustermanager.term; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java index 189bc82348a0c..9f5e31a9c6926 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -9,6 +9,8 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.core.action.ActionListener; @@ -31,14 +33,22 @@ public class ReplicationModeAwareProxy primaryTermValidationProxy; + private final DiscoveryNodes discoveryNodes; + + private final boolean isRemoteEnabled; + public ReplicationModeAwareProxy( ReplicationMode replicationModeOverride, + DiscoveryNodes discoveryNodes, ReplicationOperation.Replicas replicasProxy, - ReplicationOperation.Replicas primaryTermValidationProxy + ReplicationOperation.Replicas primaryTermValidationProxy, + boolean remoteIndexSettingsEnabled ) { super(replicasProxy); this.replicationModeOverride = Objects.requireNonNull(replicationModeOverride); this.primaryTermValidationProxy = Objects.requireNonNull(primaryTermValidationProxy); + this.discoveryNodes = discoveryNodes; + this.isRemoteEnabled = remoteIndexSettingsEnabled; } @Override @@ -60,16 +70,26 @@ protected void performOnReplicaProxy( @Override ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { - // If the current routing is the primary, then it does not need to be replicated if (shardRouting.isSameAllocation(primaryRouting)) { return ReplicationMode.NO_REPLICATION; } - + // Perform full replication during primary relocation if (primaryRouting.relocating() && shardRouting.isSameAllocation(primaryRouting.getTargetRelocatingShard())) { return ReplicationMode.FULL_REPLICATION; } - + /* + Only applicable during remote store migration. + During the migration process, remote based index settings will not be enabled, + thus we will rely on node attributes to figure out the replication mode + */ + if (isRemoteEnabled == false) { + DiscoveryNode targetNode = discoveryNodes.get(shardRouting.currentNodeId()); + if (targetNode != null && targetNode.isRemoteStoreNode() == false) { + // Perform full replication if replica is hosted on a non-remote node. + return ReplicationMode.FULL_REPLICATION; + } + } return replicationModeOverride; } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 95f998e2d89c2..49a96603f6802 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -356,7 +356,7 @@ public void performOn( * @return the overridden replication mode. */ public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.NO_REPLICATION; } return ReplicationMode.FULL_REPLICATION; @@ -642,8 +642,14 @@ public void handleException(TransportException exp) { primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, retryTimeout, - indexShard.isRemoteTranslogEnabled() - ? new ReplicationModeAwareProxy<>(getReplicationMode(indexShard), replicasProxy, termValidationProxy) + indexShard.indexSettings().isAssignedOnRemoteNode() + ? new ReplicationModeAwareProxy<>( + getReplicationMode(indexShard), + clusterState.getNodes(), + replicasProxy, + termValidationProxy, + indexShard.isRemoteTranslogEnabled() + ) : new FanoutReplicationProxy<>(replicasProxy) ).execute(); } diff --git a/server/src/main/java/org/opensearch/client/node/NodeClient.java b/server/src/main/java/org/opensearch/client/node/NodeClient.java index 6e1bb6ce79349..5780e4c1e648a 100644 --- a/server/src/main/java/org/opensearch/client/node/NodeClient.java +++ b/server/src/main/java/org/opensearch/client/node/NodeClient.java @@ -39,6 +39,7 @@ import org.opensearch.client.Client; import org.opensearch.client.support.AbstractClient; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -53,8 +54,9 @@ /** * Client that executes actions on the local node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeClient extends AbstractClient { private DynamicActionRegistry actionRegistry; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java new file mode 100644 index 0000000000000..b317b0d362825 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Identifies a specific version of ClusterState at a node. + */ +public class ClusterStateTermVersion implements Writeable { + + private final ClusterName clusterName; + private final String clusterUUID; + private final long term; + private final long version; + + public ClusterStateTermVersion(ClusterName clusterName, String clusterUUID, long term, long version) { + this.clusterName = clusterName; + this.clusterUUID = clusterUUID; + this.term = term; + this.version = version; + } + + public ClusterStateTermVersion(StreamInput in) throws IOException { + this.clusterName = new ClusterName(in); + this.clusterUUID = in.readString(); + this.term = in.readLong(); + this.version = in.readLong(); + } + + public ClusterStateTermVersion(ClusterState state) { + this.clusterName = state.getClusterName(); + this.clusterUUID = state.metadata().clusterUUID(); + this.term = state.term(); + this.version = state.version(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeString(clusterUUID); + out.writeLong(term); + out.writeLong(version); + } + + public ClusterName getClusterName() { + return clusterName; + } + + public String getClusterUUID() { + return clusterUUID; + } + + public long getTerm() { + return term; + } + + public long getVersion() { + return version; + } + + @Override + public String toString() { + return "ClusterStateTermVersion{" + + "clusterName=" + + clusterName + + ", clusterUUID='" + + clusterUUID + + '\'' + + ", term=" + + term + + ", version=" + + version + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ClusterStateTermVersion that = (ClusterStateTermVersion) o; + + if (term != that.term) return false; + if (version != that.version) return false; + if (!clusterName.equals(that.clusterName)) return false; + return clusterUUID.equals(that.clusterUUID); + } + + @Override + public int hashCode() { + int result = clusterName.hashCode(); + result = 31 * result + clusterUUID.hashCode(); + result = 31 * result + (int) (term ^ (term >>> 32)); + result = 31 * result + (int) (version ^ (version >>> 32)); + return result; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index bc365b9872037..5d896e392e6bc 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -215,7 +215,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); - if (remoteDN.isEmpty()) { + if (remoteDN.isEmpty() && node.isRemoteStoreNode()) { // This is hit only on cases where we encounter first remote node logger.info("Updating system repository now for remote store"); repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index f117d1a4a11a2..451871b10d5eb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,8 +88,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathResolver; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.remote.RemoteStorePathStrategyResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -113,6 +115,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -170,7 +173,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathResolver remoteStorePathResolver; + private final RemoteStorePathStrategyResolver remoteStorePathStrategyResolver; public MetadataCreateIndexService( final Settings settings, @@ -203,8 +206,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); - remoteStorePathResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathResolver(clusterService.getClusterSettings()) + remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings()) : null; } @@ -553,11 +556,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - - if (remoteStorePathResolver != null) { - String pathType = remoteStorePathResolver.resolveType().toString(); - tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, Map.of(RemoteStorePathType.NAME, pathType)); - } + addRemoteStorePathStrategyInCustomData(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -566,6 +565,37 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( return tempMetadata; } + /** + * Adds the remote store path type information in custom data of index metadata. + * + * @param tmpImdBuilder index metadata builder. + * @param assertNullOldType flag to verify that the old remote store path type is null + */ + public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStorePathStrategyResolver != null) { + // It is possible that remote custom data exists already. In such cases, we need to only update the path type + // in the remote store custom data map. + Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + Map remoteCustomData = existingRemoteCustomData == null + ? new HashMap<>() + : new HashMap<>(existingRemoteCustomData); + // Determine the path type for use using the remoteStorePathResolver. + RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); + String oldPathType = remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); + String oldHashAlgorithm = remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); + assert !assertNullOldType || (Objects.isNull(oldPathType) && Objects.isNull(oldHashAlgorithm)); + logger.trace( + () -> new ParameterizedMessage( + "Added newPathStrategy={}, replaced oldPathType={} oldHashAlgorithm={}", + newPathStrategy, + oldPathType, + oldHashAlgorithm + ) + ); + tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); + } + } + private ClusterState applyCreateIndexRequestWithV1Templates( final ClusterState currentState, final CreateIndexClusterStateUpdateRequest request, diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 5375910c57579..6702db4b43e91 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -30,9 +30,9 @@ public class AllocationConstraints { public AllocationConstraints() { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); - this.constraints.putIfAbsent(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached(0.0f))); } public void updateAllocationConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index ae2d4a0926194..08fe8f92d1f80 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -28,6 +28,11 @@ public class ConstraintTypes { */ public final static String CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID = "cluster.primary.shard.balance.constraint"; + /** + * Defines a cluster constraint which is breached when a node contains more than avg primary shards across all indices + */ + public final static String CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID = "cluster.primary.shard.rebalance.constraint"; + /** * Defines an index constraint which is breached when a node contains more than avg number of shards for an index */ @@ -70,14 +75,14 @@ public static Predicate isPerIndexPrimaryShardsPerN } /** - * Defines a predicate which returns true when a node contains more than average number of primary shards. This - * constraint is used in weight calculation during allocation only. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} - * is assigned to node resulting in lesser chances of node being selected as allocation target + * Defines a predicate which returns true when a node contains more than average number of primary shards with added buffer. This + * constraint is used in weight calculation during allocation/rebalance both. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} + * is assigned to node resulting in lesser chances of node being selected as allocation/rebalance target */ - public static Predicate isPrimaryShardsPerNodeBreached() { + public static Predicate isPrimaryShardsPerNodeBreached(float buffer) { return (params) -> { int primaryShardCount = params.getNode().numPrimaryShards(); - int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode()); + int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode() * (1 + buffer)); return primaryShardCount >= allowedPrimaryShardCount; }; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java index a4036ec47ec0e..2c2138af18abc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java @@ -14,8 +14,10 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPrimaryShardsPerNodeBreached; /** * Constraints applied during rebalancing round; specify conditions which, if breached, reduce the @@ -27,9 +29,13 @@ public class RebalanceConstraints { private Map constraints; - public RebalanceConstraints() { + public RebalanceConstraints(RebalanceParameter rebalanceParameter) { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put( + CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, + new Constraint(isPrimaryShardsPerNodeBreached(rebalanceParameter.getPreferPrimaryBalanceBuffer())) + ); } public void updateRebalanceConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java new file mode 100644 index 0000000000000..35fbaede93ba3 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +/** + * RebalanceConstraint Params + */ +public class RebalanceParameter { + private float preferPrimaryBalanceBuffer; + + public RebalanceParameter(float preferPrimaryBalanceBuffer) { + this.preferPrimaryBalanceBuffer = preferPrimaryBalanceBuffer; + } + + public float getPreferPrimaryBalanceBuffer() { + return preferPrimaryBalanceBuffer; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 41ace0e7661fe..b2443490dd973 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.routing.allocation.ConstraintTypes; import org.opensearch.cluster.routing.allocation.MoveDecision; import org.opensearch.cluster.routing.allocation.RebalanceConstraints; +import org.opensearch.cluster.routing.allocation.RebalanceParameter; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.inject.Inject; @@ -61,6 +62,7 @@ import java.util.Set; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; @@ -145,10 +147,29 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting PREFER_PRIMARY_SHARD_REBALANCE = Setting.boolSetting( + "cluster.routing.allocation.rebalance.primary.enable", + false, + Property.Dynamic, + Property.NodeScope + ); + + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( + "cluster.routing.allocation.rebalance.primary.buffer", + 0.10f, + 0.0f, + Property.Dynamic, + Property.NodeScope + ); + private volatile boolean movePrimaryFirst; private volatile ShardMovementStrategy shardMovementStrategy; private volatile boolean preferPrimaryShardBalance; + private volatile boolean preferPrimaryShardRebalance; + private volatile float preferPrimaryShardRebalanceBuffer; + private volatile float indexBalanceFactor; + private volatile float shardBalanceFactor; private volatile WeightFunction weightFunction; private volatile float threshold; @@ -158,14 +179,21 @@ public BalancedShardsAllocator(Settings settings) { @Inject public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { - setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); + setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); + setPreferPrimaryShardRebalance(PREFER_PRIMARY_SHARD_REBALANCE.get(settings)); setShardMovementStrategy(SHARD_MOVEMENT_STRATEGY_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVEMENT_STRATEGY_SETTING, this::setShardMovementStrategy); - clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, this::updateIndexBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(SHARD_BALANCE_FACTOR_SETTING, this::updateShardBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); + clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } @@ -190,8 +218,35 @@ private void setShardMovementStrategy(ShardMovementStrategy shardMovementStrateg } } - private void setWeightFunction(float indexBalance, float shardBalanceFactor) { - weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); + private void setIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + } + + private void setShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + } + + private void setPreferPrimaryShardRebalanceBuffer(float preferPrimaryShardRebalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardRebalanceBuffer; + } + + private void updateIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + updateWeightFunction(); + } + + private void updateShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + updateWeightFunction(); + } + + private void updatePreferPrimaryShardBalanceBuffer(float preferPrimaryShardBalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardBalanceBuffer; + updateWeightFunction(); + } + + private void updateWeightFunction() { + weightFunction = new WeightFunction(this.indexBalanceFactor, this.shardBalanceFactor, this.preferPrimaryShardRebalanceBuffer); } /** @@ -205,6 +260,11 @@ private void setPreferPrimaryShardBalance(boolean preferPrimaryShardBalance) { this.weightFunction.updateRebalanceConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); } + private void setPreferPrimaryShardRebalance(boolean preferPrimaryShardRebalance) { + this.preferPrimaryShardRebalance = preferPrimaryShardRebalance; + this.weightFunction.updateRebalanceConstraint(CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, preferPrimaryShardRebalance); + } + private void setThreshold(float threshold) { this.threshold = threshold; } @@ -221,7 +281,8 @@ public void allocate(RoutingAllocation allocation) { shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -242,7 +303,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -348,7 +410,7 @@ static class WeightFunction { private AllocationConstraints constraints; private RebalanceConstraints rebalanceConstraints; - WeightFunction(float indexBalance, float shardBalance) { + WeightFunction(float indexBalance, float shardBalance, float preferPrimaryBalanceBuffer) { float sum = indexBalance + shardBalance; if (sum <= 0.0f) { throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); @@ -357,8 +419,9 @@ static class WeightFunction { theta1 = indexBalance / sum; this.indexBalance = indexBalance; this.shardBalance = shardBalance; + RebalanceParameter rebalanceParameter = new RebalanceParameter(preferPrimaryBalanceBuffer); this.constraints = new AllocationConstraints(); - this.rebalanceConstraints = new RebalanceConstraints(); + this.rebalanceConstraints = new RebalanceConstraints(rebalanceParameter); // Enable index shard per node breach constraint updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); } @@ -495,7 +558,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 45f64a5b29b04..ec25d041bda43 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -61,6 +61,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final ShardMovementStrategy shardMovementStrategy; private final boolean preferPrimaryBalance; + private final boolean preferPrimaryRebalance; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -76,7 +77,8 @@ public LocalShardsBalancer( ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, - boolean preferPrimaryBalance + boolean preferPrimaryBalance, + boolean preferPrimaryRebalance ) { this.logger = logger; this.allocation = allocation; @@ -91,6 +93,7 @@ public LocalShardsBalancer( sorter = newNodeSorter(); inEligibleTargetNode = new HashSet<>(); this.preferPrimaryBalance = preferPrimaryBalance; + this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; } @@ -995,13 +998,18 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala continue; } // This is a safety net which prevents un-necessary primary shard relocations from maxNode to minNode when - // doing such relocation wouldn't help in primary balance. + // doing such relocation wouldn't help in primary balance. The condition won't be applicable when we enable node level + // primary rebalance if (preferPrimaryBalance == true + && preferPrimaryRebalance == false && shard.primary() && maxNode.numPrimaryShards(shard.getIndexName()) - minNode.numPrimaryShards(shard.getIndexName()) < 2) { continue; } - + // Relax the above condition to per node to allow rebalancing to attain global balance + if (preferPrimaryRebalance == true && shard.primary() && maxNode.numPrimaryShards() - minNode.numPrimaryShards() < 2) { + continue; + } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index c54536e9c46e2..763594ed52977 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -33,6 +33,7 @@ package org.opensearch.common.blobstore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.ArrayList; import java.util.Collections; @@ -42,8 +43,9 @@ /** * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BlobPath implements Iterable { private static final String SEPARATOR = "/"; diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java index a18ca8b9d5c39..c41641921c822 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -9,6 +9,7 @@ package org.opensearch.common.blobstore; import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.crypto.CryptoHandler; import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.crypto.CryptoRegistryException; @@ -65,6 +66,15 @@ public BlobContainer blobContainer(BlobPath path) { return new EncryptedBlobContainer<>(blobContainer, cryptoHandler); } + /** + * Reoload blobstore metadata + * @param repositoryMetadata new repository metadata + */ + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + blobStore.reload(repositoryMetadata); + } + /** * Retrieves statistics about the BlobStore. Delegates the call to the underlying BlobStore's stats() method. * diff --git a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java index 369313f9f93f4..68e1cdf6139e2 100644 --- a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java +++ b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java @@ -32,11 +32,14 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Listener for removing an element from the cache * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi @FunctionalInterface public interface RemovalListener { void onRemoval(RemovalNotification notification); diff --git a/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java b/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java index 0a98542a05bb7..df698112c60d1 100644 --- a/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java +++ b/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java @@ -8,6 +8,7 @@ package org.opensearch.common.cache.policy; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -62,7 +63,10 @@ public void writeToNoId(StreamOutput out) throws IOException { /** * A class containing information needed for all cache policies * to decide whether to admit a given value. + * + * @opensearch.experimental */ + @ExperimentalApi public static class PolicyValues implements Writeable { final long tookTimeNanos; // More values can be added here as they're needed for future policies diff --git a/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java b/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java index 35e28707d1ca3..46a8ed5a72ccf 100644 --- a/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java +++ b/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java @@ -8,10 +8,15 @@ package org.opensearch.common.cache.serializer; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Defines an interface for serializers, to be used by pluggable caches. * T is the class of the original object, and U is the serialized class. + * + * @opensearch.experimental */ +@ExperimentalApi public interface Serializer { /** * Serializes an object. diff --git a/server/src/main/java/org/opensearch/common/hash/FNV1a.java b/server/src/main/java/org/opensearch/common/hash/FNV1a.java new file mode 100644 index 0000000000000..cab28d0f2d68f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/hash/FNV1a.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.nio.charset.StandardCharsets; + +/** + * Provides hashing function using FNV1a hash function. @see FNV author's website. + * 32 bit Java port of http://www.isthe.com/chongo/src/fnv/hash_32a.c + * 64 bit Java port of http://www.isthe.com/chongo/src/fnv/hash_64a.c + * + * @opensearch.internal + */ +public class FNV1a { + private static final long FNV_OFFSET_BASIS_32 = 0x811c9dc5L; + private static final long FNV_PRIME_32 = 0x01000193L; + + private static final long FNV_OFFSET_BASIS_64 = 0xcbf29ce484222325L; + private static final long FNV_PRIME_64 = 0x100000001b3L; + + // FNV-1a hash computation for 32-bit hash + public static long hash32(String input) { + long hash = FNV_OFFSET_BASIS_32; + byte[] bytes = input.getBytes(StandardCharsets.UTF_8); + for (byte b : bytes) { + hash ^= (b & 0xFF); + hash *= FNV_PRIME_32; + } + return hash; + } + + // FNV-1a hash computation for 64-bit hash + public static long hash64(String input) { + long hash = FNV_OFFSET_BASIS_64; + byte[] bytes = input.getBytes(StandardCharsets.UTF_8); + for (byte b : bytes) { + hash ^= (b & 0xFF); + hash *= FNV_PRIME_64; + } + return hash; + } +} diff --git a/server/src/main/java/org/opensearch/common/inject/Binder.java b/server/src/main/java/org/opensearch/common/inject/Binder.java index a733a19608ac1..a9d16becfb5ab 100644 --- a/server/src/main/java/org/opensearch/common/inject/Binder.java +++ b/server/src/main/java/org/opensearch/common/inject/Binder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.binder.AnnotatedBindingBuilder; import org.opensearch.common.inject.binder.AnnotatedConstantBindingBuilder; import org.opensearch.common.inject.binder.LinkedBindingBuilder; @@ -198,8 +199,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @author kevinb@google.com (Kevin Bourrillion) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Binder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Binding.java b/server/src/main/java/org/opensearch/common/inject/Binding.java index 53d02e37502af..a42237697a1d2 100644 --- a/server/src/main/java/org/opensearch/common/inject/Binding.java +++ b/server/src/main/java/org/opensearch/common/inject/Binding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.spi.BindingScopingVisitor; import org.opensearch.common.inject.spi.BindingTargetVisitor; import org.opensearch.common.inject.spi.Element; @@ -69,8 +70,9 @@ * @author crazybob@google.com (Bob Lee) * @author jessewilson@google.com (Jesse Wilson) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Binding extends Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java b/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java index 4379a93482560..e3a32754a1bdb 100644 --- a/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java +++ b/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.internal.Errors; import org.opensearch.common.inject.spi.Message; @@ -46,8 +47,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ConfigurationException extends RuntimeException { private final Set messages; private Object partialValue = null; diff --git a/server/src/main/java/org/opensearch/common/inject/Injector.java b/server/src/main/java/org/opensearch/common/inject/Injector.java index ff212c6313371..772578dd6bb2c 100644 --- a/server/src/main/java/org/opensearch/common/inject/Injector.java +++ b/server/src/main/java/org/opensearch/common/inject/Injector.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + import java.util.List; /** @@ -54,8 +56,9 @@ * @author crazybob@google.com (Bob Lee) * @author jessewilson@google.com (Jesse Wilson) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Injector { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Key.java b/server/src/main/java/org/opensearch/common/inject/Key.java index cd305353a555d..32f168d18e523 100644 --- a/server/src/main/java/org/opensearch/common/inject/Key.java +++ b/server/src/main/java/org/opensearch/common/inject/Key.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.internal.Annotations; import org.opensearch.common.inject.internal.MoreTypes; import org.opensearch.common.inject.internal.ToStringBuilder; @@ -59,8 +60,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Key { private final AnnotationStrategy annotationStrategy; diff --git a/server/src/main/java/org/opensearch/common/inject/MembersInjector.java b/server/src/main/java/org/opensearch/common/inject/MembersInjector.java index 891762375d5a2..872ae883e246b 100644 --- a/server/src/main/java/org/opensearch/common/inject/MembersInjector.java +++ b/server/src/main/java/org/opensearch/common/inject/MembersInjector.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * Injects dependencies into the fields and methods on instances of type {@code T}. Ignores the * presence or absence of an injectable constructor. @@ -38,8 +40,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface MembersInjector { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Module.java b/server/src/main/java/org/opensearch/common/inject/Module.java index b1fc031192ea0..e66044ff26c40 100644 --- a/server/src/main/java/org/opensearch/common/inject/Module.java +++ b/server/src/main/java/org/opensearch/common/inject/Module.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * A module contributes configuration information, typically interface * bindings, which will be used to create an {@link Injector}. A Guice-based @@ -43,8 +45,9 @@ * Use scope and binding annotations on these methods to configure the * bindings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Module { /** diff --git a/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java b/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java index 87635880e29d8..2b6b2e0aad146 100644 --- a/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java +++ b/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.binder.AnnotatedElementBuilder; /** @@ -38,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface PrivateBinder extends Binder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Provider.java b/server/src/main/java/org/opensearch/common/inject/Provider.java index 97f9e9ae503cd..988143b328828 100644 --- a/server/src/main/java/org/opensearch/common/inject/Provider.java +++ b/server/src/main/java/org/opensearch/common/inject/Provider.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * An object capable of providing instances of type {@code T}. Providers are used in numerous ways * by Guice: @@ -50,8 +52,9 @@ * @param the type of object this provides * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Provider { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Scope.java b/server/src/main/java/org/opensearch/common/inject/Scope.java index a21495f522d5e..6fb9f560981ef 100644 --- a/server/src/main/java/org/opensearch/common/inject/Scope.java +++ b/server/src/main/java/org/opensearch/common/inject/Scope.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * A scope is a level of visibility that instances provided by Guice may have. * By default, an instance created by the {@link Injector} has no scope, @@ -42,8 +44,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Scope { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Stage.java b/server/src/main/java/org/opensearch/common/inject/Stage.java index d5996bd1363e9..fbb6e389ef43f 100644 --- a/server/src/main/java/org/opensearch/common/inject/Stage.java +++ b/server/src/main/java/org/opensearch/common/inject/Stage.java @@ -29,13 +29,16 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * The stage we're running in. * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum Stage { /** diff --git a/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java b/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java index f0cca2990b407..8ac04e5d0ac1d 100644 --- a/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.internal.MoreTypes; import org.opensearch.common.inject.util.Types; @@ -77,8 +78,9 @@ * @author crazybob@google.com (Bob Lee) * @author jessewilson@google.com (Jesse Wilson) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TypeLiteral { final Class rawType; diff --git a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java index bcd593a8cbf7b..5c3c6eac9bd3a 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.Annotation; /** @@ -36,8 +38,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnnotatedBindingBuilder extends LinkedBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java index 42c208a2b37ea..71ea1ba0a5207 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.Annotation; /** @@ -36,8 +38,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnnotatedConstantBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java index f2d0916790b6b..54fcb915d83c9 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.Annotation; /** @@ -37,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnnotatedElementBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java index 595c477d3e28b..feaee3ed59f46 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java @@ -29,11 +29,14 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + /** * Binds to a constant value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ConstantBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java index 2368fef16471c..e8c4b197253b5 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; import org.opensearch.common.inject.TypeLiteral; @@ -38,8 +39,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LinkedBindingBuilder extends ScopedBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java index 73dd4414f17a2..c360b9571bc4a 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Scope; import java.lang.annotation.Annotation; @@ -38,8 +39,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ScopedBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java b/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java index 21bb63cfef097..4e254f8641350 100644 --- a/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java +++ b/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java @@ -29,13 +29,16 @@ package org.opensearch.common.inject.matcher; +import org.opensearch.common.annotation.PublicApi; + /** * Returns {@code true} or {@code false} for a given input. * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Matcher { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java index d7c7d9d65051d..b4fbdf2fdb72b 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Scope; import java.lang.annotation.Annotation; @@ -40,8 +41,9 @@ * {@code return null} if no return type is needed. * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BindingScopingVisitor { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java index 91df812b58ac4..9543e731308bd 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; + /** * Visits each of the strategies used to find an instance to satisfy an injection. * @@ -36,8 +38,9 @@ * {@code return null} if no return type is needed. * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BindingTargetVisitor { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java index 997bf78234fd1..8eec6cefe53c7 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import java.util.Set; @@ -40,8 +41,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ConstructorBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java index e8d6b346f8596..a07da68a88931 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; @@ -41,8 +42,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ConvertedConstantBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java b/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java index be1336ad0f297..e541ba0b73bf5 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Key; import java.util.HashSet; @@ -47,8 +48,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Dependency { private final InjectionPoint injectionPoint; private final Key key; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/Element.java b/server/src/main/java/org/opensearch/common/inject/spi/Element.java index 660aca1bd45ab..58a696fb7ffa9 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/Element.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/Element.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; /** @@ -43,8 +44,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java index d415560fc03c8..b88f11b9378aa 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; /** @@ -38,8 +39,9 @@ * {@code return null} if no return type is needed. * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ElementVisitor { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java index d2563bc2728cd..6c1679432abe5 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Binding; @@ -38,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ExposedBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java b/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java index 7a760d2b84e9f..878e919cda4cc 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; + /** * Listens for injections into instances of type {@code I}. Useful for performing further * injections, post-injection initialization, and more. @@ -37,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface InjectionListener { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java index c88b2281107ed..542cbd780a8b6 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.ConfigurationException; import org.opensearch.common.inject.Inject; import org.opensearch.common.inject.Key; @@ -66,8 +67,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InjectionPoint { private final boolean optional; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java b/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java index 6ce5febbb6711..a5faca6264424 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.ConfigurationException; import org.opensearch.common.inject.TypeLiteral; @@ -46,8 +47,9 @@ * @author mikeward@google.com (Mike Ward) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InjectionRequest implements Element { private final Object source; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java index fd7c1303ed6fc..f73b284ae2e8c 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import java.util.Set; @@ -39,8 +40,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface InstanceBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java index 10b270e499603..01da905f8da47 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; @@ -38,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LinkedKeyBinding extends Binding { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java b/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java index 1f652708de875..b8a07146812c1 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.MembersInjector; import org.opensearch.common.inject.TypeLiteral; @@ -45,8 +46,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MembersInjectorLookup implements Element { private final Object source; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/Message.java b/server/src/main/java/org/opensearch/common/inject/spi/Message.java index 78829e82c150e..13184a7d82f0c 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/Message.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/Message.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.internal.Errors; import org.opensearch.common.inject.internal.SourceProvider; @@ -50,8 +51,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Message implements Element { private final String message; private final Throwable cause; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java b/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java index e4d86a356cd53..6330cbe33de58 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Injector; import org.opensearch.common.inject.Key; @@ -42,8 +43,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface PrivateElements extends Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java index 0a63fefc0a9e9..dd55e9805843f 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; @@ -40,8 +41,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ProviderBinding> extends Binding { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java index 654f40e627e4b..25bac3b5df34c 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Provider; @@ -41,8 +42,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ProviderInstanceBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java index 6f1ae8f2b9a03..f68e1662ad124 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; @@ -40,8 +41,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ProviderKeyBinding extends Binding { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java index 16060ddd3e222..6afe7346a1431 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; @@ -45,8 +46,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ProviderLookup implements Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java index 7a619456e06e3..ca03f4291a062 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Scope; @@ -46,8 +47,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ScopeBinding implements Element { private final Object source; private final Class annotationType; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java b/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java index 494e35e6c4490..c426639d85cab 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.ConfigurationException; @@ -45,8 +46,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class StaticInjectionRequest implements Element { private final Object source; private final Class type; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java index 93a0f607ddc27..2386c1e528db6 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.TypeLiteral; /** @@ -37,8 +38,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TypeConverter { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java index 00b8c7c013b5a..59311de0fb3f5 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.matcher.Matcher; @@ -45,8 +46,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TypeConverterBinding implements Element { private final Object source; private final Matcher> typeMatcher; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java index e06751668c0f1..61756a5bcad95 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.MembersInjector; import org.opensearch.common.inject.Provider; @@ -43,8 +44,9 @@ * @param the injectable type encountered * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @SuppressWarnings("overloads") public interface TypeEncounter { diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java index fd7004aa80df0..3157fa15f471b 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.TypeLiteral; /** @@ -43,8 +44,9 @@ * * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TypeListener { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java index 505028f09232d..4ddcf3fc11bc1 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.matcher.Matcher; @@ -42,8 +43,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TypeListenerBinding implements Element { private final Object source; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java index 37e40d45cb5a9..56890efdfcd8d 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; /** @@ -38,6 +39,7 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface UntargettedBinding extends Binding {} diff --git a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java index f9a87b9e74214..ec2cfde84ca5f 100644 --- a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java +++ b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java @@ -84,8 +84,10 @@ public DelegatingCacheHelper getDelegatingCacheHelper() { /** * Wraps existing IndexReader cache helper which internally provides a way to wrap CacheKey. - * @opensearch.internal + * + * @opensearch.api */ + @PublicApi(since = "2.13.0") public class DelegatingCacheHelper implements CacheHelper { private final CacheHelper cacheHelper; private final DelegatingCacheKey serializableCacheKey; @@ -113,7 +115,10 @@ public void addClosedListener(ClosedListener listener) { /** * Wraps internal IndexReader.CacheKey and attaches a uniqueId to it which can be eventually be used instead of * object itself for serialization purposes. + * + * @opensearch.api */ + @PublicApi(since = "2.13.0") public class DelegatingCacheKey { private final CacheKey cacheKey; private final String uniqueId; diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 359facdce633b..94d44d5b35d74 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -32,13 +32,16 @@ package org.opensearch.common.metrics; +import org.opensearch.common.annotation.PublicApi; + import java.util.concurrent.atomic.LongAdder; /** * An average metric for tracking. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MeanMetric implements Metric { private final LongAdder counter = new LongAdder(); diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index d0f5dd9e4581d..bb8da190a6f35 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -55,6 +55,8 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.RawTaskStatus; @@ -74,7 +76,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * A module to handle registering and binding all network related classes. @@ -173,13 +177,31 @@ public NetworkModule( ClusterSettings clusterSettings, Tracer tracer, List transportInterceptors, - Collection secureTransportSettingsProvider + Collection secureSettingsFactories ) { this.settings = settings; - if (secureTransportSettingsProvider.size() > 1) { + final Collection secureTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureTransportSettingsProviders.size() > 1) { + throw new IllegalArgumentException( + "there is more than one secure transport settings provider: " + secureTransportSettingsProviders + ); + } + + final Collection secureHttpTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureHttpTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureHttpTransportSettingsProviders.size() > 1) { throw new IllegalArgumentException( - "there is more than one secure transport settings provider: " + secureTransportSettingsProvider + "there is more than one secure HTTP transport settings provider: " + secureHttpTransportSettingsProviders ); } @@ -213,9 +235,9 @@ public NetworkModule( registerTransport(entry.getKey(), entry.getValue()); } - // Register any secure transports if available - if (secureTransportSettingsProvider.isEmpty() == false) { - final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProvider.iterator().next(); + // Register any HTTP secure transports if available + if (secureHttpTransportSettingsProviders.isEmpty() == false) { + final SecureHttpTransportSettingsProvider secureSettingProvider = secureHttpTransportSettingsProviders.iterator().next(); final Map> secureHttpTransportFactory = plugin.getSecureHttpTransports( settings, @@ -233,6 +255,11 @@ public NetworkModule( for (Map.Entry> entry : secureHttpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); } + } + + // Register any secure transports if available + if (secureTransportSettingsProviders.isEmpty() == false) { + final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProviders.iterator().next(); final Map> secureTransportFactory = plugin.getSecureTransports( settings, diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 7c8afb6b5c1b5..1b529a3b2bef1 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -117,6 +117,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.analysis.HunspellService; import org.opensearch.indices.breaker.BreakerSettings; @@ -251,7 +252,9 @@ public void apply(Settings value, Settings current, Settings previous) { AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER, BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE, + BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, @@ -297,7 +300,6 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, - RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -706,21 +708,26 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + + // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT, IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT, - IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, // Concurrent segment search settings SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, + + RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 8633cf1fe25ea..bdfce72d106d3 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -12,9 +12,11 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import java.util.List; + /** * Utility class to manage feature flags. Feature flags are system properties that must be set on the JVM. - * These are used to gate the visibility/availability of incomplete features. Fore more information, see + * These are used to gate the visibility/availability of incomplete features. For more information, see * https://featureflags.io/feature-flag-introduction/ * * @opensearch.internal @@ -65,11 +67,54 @@ public class FeatureFlags { */ public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_STORE_MIGRATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + + public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); + + public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); + + public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); + + public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( + DATETIME_FORMATTER_CACHING, + true, + Property.NodeScope + ); + + public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( + WRITEABLE_REMOTE_INDEX, + false, + Property.NodeScope + ); + + public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); + + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( + REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, + EXTENSIONS_SETTING, + IDENTITY_SETTING, + TELEMETRY_SETTING, + DATETIME_FORMATTER_CACHING_SETTING, + WRITEABLE_REMOTE_INDEX_SETTING, + PLUGGABLE_CACHE_SETTING + ); /** * Should store the settings from opensearch.yml. */ private static Settings settings; + static { + Settings.Builder settingsBuilder = Settings.builder(); + for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { + settingsBuilder = settingsBuilder.put(ffSetting.getKey(), ffSetting.getDefault(Settings.EMPTY)); + } + settings = settingsBuilder.build(); + } + /** * This method is responsible to map settings from opensearch.yml to local stored * settings value. That is used for the existing isEnabled method. @@ -77,7 +122,14 @@ public class FeatureFlags { * @param openSearchSettings The settings stored in opensearch.yml. */ public static void initializeFeatureFlags(Settings openSearchSettings) { - settings = openSearchSettings; + Settings.Builder settingsBuilder = Settings.builder(); + for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { + settingsBuilder = settingsBuilder.put( + ffSetting.getKey(), + openSearchSettings.getAsBoolean(ffSetting.getKey(), ffSetting.getDefault(openSearchSettings)) + ); + } + settings = settingsBuilder.build(); } /** @@ -103,30 +155,4 @@ public static boolean isEnabled(Setting featureFlag) { return featureFlag.getDefault(Settings.EMPTY); } } - - public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( - REMOTE_STORE_MIGRATION_EXPERIMENTAL, - false, - Property.NodeScope - ); - - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); - - public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); - - public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); - - public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( - DATETIME_FORMATTER_CACHING, - true, - Property.NodeScope - ); - - public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( - WRITEABLE_REMOTE_INDEX, - false, - Property.NodeScope - ); - - public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 41783b89ccc69..f4503ce55e6bc 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -52,7 +53,10 @@ /** * An action that forwards REST requests to an extension + * + * @opensearch.experimental */ +@ExperimentalApi public class RestSendToExtensionAction extends BaseRestHandler { private static final String SEND_TO_EXTENSION_ACTION = "send_to_extension_action"; diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java new file mode 100644 index 0000000000000..3459f1591b633 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -0,0 +1,188 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.NodeAllocationResult; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.collect.Tuple; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.AsyncShardFetch.FetchResult; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Allocates replica shards in a batch mode + * + * @opensearch.internal + */ +public abstract class ReplicaShardBatchAllocator extends ReplicaShardAllocator { + + /** + * Process existing recoveries of replicas and see if we need to cancel them if we find a better + * match. Today, a better match is one that can perform a no-op recovery while the previous recovery + * has to copy segment files. + * + * @param allocation the overall routing allocation + * @param shardBatches a list of shard batches to check for existing recoveries + */ + public void processExistingRecoveries(RoutingAllocation allocation, List> shardBatches) { + List shardCancellationActions = new ArrayList<>(); + // iterate through the batches, each batch needs to be processed together as fetch call should be made for shards from same batch + for (List shardBatch : shardBatches) { + List eligibleShards = new ArrayList<>(); + List ineligibleShards = new ArrayList<>(); + // iterate over shards to check for match for each of those + for (ShardRouting shard : shardBatch) { + if (shard != null && !shard.primary()) { + // need to iterate over all the nodes to find matching shard + if (shouldSkipFetchForRecovery(shard)) { + ineligibleShards.add(shard); + continue; + } + eligibleShards.add(shard); + } + } + AsyncShardFetch.FetchResult shardState = fetchData(eligibleShards, ineligibleShards, allocation); + if (!shardState.hasData()) { + logger.trace("{}: fetching new stores for initializing shard batch", eligibleShards); + continue; // still fetching + } + for (ShardRouting shard : eligibleShards) { + Map nodeShardStores = convertToNodeStoreFilesMetadataMap(shard, shardState); + + Runnable cancellationAction = cancelExistingRecoveryForBetterMatch(shard, allocation, nodeShardStores); + if (cancellationAction != null) { + shardCancellationActions.add(cancellationAction); + } + } + } + for (Runnable action : shardCancellationActions) { + action.run(); + } + } + + abstract protected FetchResult fetchData( + List eligibleShards, + List ineligibleShards, + RoutingAllocation allocation + ); + + @Override + protected FetchResult fetchData( + ShardRouting shard, + RoutingAllocation allocation + ) { + logger.error("fetchData for single shard called via batch allocator"); + throw new IllegalStateException("ReplicaShardBatchAllocator should only be used for a batch of shards"); + } + + @Override + public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassignedShard, RoutingAllocation allocation, Logger logger) { + return makeAllocationDecision(Collections.singletonList(unassignedShard), allocation, logger).get(unassignedShard); + } + + @Override + public HashMap makeAllocationDecision( + List shards, + RoutingAllocation allocation, + Logger logger + ) { + HashMap shardAllocationDecisions = new HashMap<>(); + final boolean explain = allocation.debugDecision(); + List eligibleShards = new ArrayList<>(); + List ineligibleShards = new ArrayList<>(); + HashMap>> nodeAllocationDecisions = new HashMap<>(); + for (ShardRouting shard : shards) { + if (!isResponsibleFor(shard)) { + // this allocator n is not responsible for allocating this shard + ineligibleShards.add(shard); + shardAllocationDecisions.put(shard, AllocateUnassignedDecision.NOT_TAKEN); + continue; + } + + Tuple> result = canBeAllocatedToAtLeastOneNode(shard, allocation); + Decision allocationDecision = result.v1(); + if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shard))) { + // only return early if we are not in explain mode, or we are in explain mode but we have not + // yet attempted to fetch any shard data + logger.trace("{}: ignoring allocation, can't be allocated on any node", shard); + shardAllocationDecisions.put( + shard, + AllocateUnassignedDecision.no( + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), + result.v2() != null ? new ArrayList<>(result.v2().values()) : null + ) + ); + continue; + } + // storing the nodeDecisions in nodeAllocationDecisions if the decision is not YES + // so that we don't have to compute the decisions again + nodeAllocationDecisions.put(shard, result); + + eligibleShards.add(shard); + } + + // Do not call fetchData if there are no eligible shards + if (eligibleShards.isEmpty()) { + return shardAllocationDecisions; + } + // only fetch data for eligible shards + final FetchResult shardsState = fetchData(eligibleShards, ineligibleShards, allocation); + + for (ShardRouting unassignedShard : eligibleShards) { + Tuple> result = nodeAllocationDecisions.get(unassignedShard); + shardAllocationDecisions.put( + unassignedShard, + getAllocationDecision( + unassignedShard, + allocation, + convertToNodeStoreFilesMetadataMap(unassignedShard, shardsState), + result, + logger + ) + ); + } + return shardAllocationDecisions; + } + + private Map convertToNodeStoreFilesMetadataMap( + ShardRouting unassignedShard, + FetchResult data + ) { + if (!data.hasData()) { + return null; + } + + Map map = new HashMap<>(); + + data.getData().forEach((discoveryNode, value) -> { + Map batch = value.getNodeStoreFilesMetadataBatch(); + NodeStoreFilesMetadata metadata = batch.get(unassignedShard.shardId()); + if (metadata != null) { + map.put(discoveryNode, metadata.storeFilesMetadata()); + } + }); + + return map; + } +} diff --git a/server/src/main/java/org/opensearch/http/HttpInfo.java b/server/src/main/java/org/opensearch/http/HttpInfo.java index 10f2d50dacb14..4a39e40c471b1 100644 --- a/server/src/main/java/org/opensearch/http/HttpInfo.java +++ b/server/src/main/java/org/opensearch/http/HttpInfo.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * Information about an http connection * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HttpInfo implements ReportingService.Info { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(HttpInfo.class); diff --git a/server/src/main/java/org/opensearch/http/HttpServerTransport.java b/server/src/main/java/org/opensearch/http/HttpServerTransport.java index 890136cb67e60..012b69c29c1d4 100644 --- a/server/src/main/java/org/opensearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/HttpServerTransport.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.transport.BoundTransportAddress; @@ -42,8 +43,9 @@ /** * HTTP Transport server * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpServerTransport extends LifecycleComponent, ReportingService { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; diff --git a/server/src/main/java/org/opensearch/http/HttpStats.java b/server/src/main/java/org/opensearch/http/HttpStats.java index 078b84b7bc563..f69eff59e830d 100644 --- a/server/src/main/java/org/opensearch/http/HttpStats.java +++ b/server/src/main/java/org/opensearch/http/HttpStats.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Stats for HTTP connections * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HttpStats implements Writeable, ToXContentFragment { private final long serverOpen; diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 6ac10a221d49e..3c4cb4fd596c1 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -79,6 +79,7 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndicesQueryCache; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoverySettings; @@ -604,8 +605,8 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -663,8 +664,8 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, - clusterRemoteTranslogBufferIntervalSupplier, - recoverySettings + recoverySettings, + remoteStoreSettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0909e2d5c8ff0..a7b29314210df 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; @@ -90,16 +91,20 @@ import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.SimilarityService; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -180,8 +185,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; - private final Supplier clusterRemoteTranslogBufferIntervalSupplier; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; public IndexService( IndexSettings indexSettings, @@ -216,8 +221,8 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -293,8 +298,8 @@ public IndexService( this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; - this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; updateFsyncTaskIfNecessary(); } @@ -455,7 +460,11 @@ public synchronized IndexShard createShard( final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final RepositoriesService repositoriesService, + final DiscoveryNode targetNode, + @Nullable DiscoveryNode sourceNode, + DiscoveryNodes discoveryNodes ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -484,10 +493,27 @@ public synchronized IndexShard createShard( warmer.warm(reader, shard, IndexService.this.indexSettings); } }; - Store remoteStore = null; - if (this.indexSettings.isRemoteStoreEnabled()) { - Directory remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); + boolean seedRemote = false; + if (targetNode.isRemoteStoreNode()) { + final Directory remoteDirectory; + if (this.indexSettings.isRemoteStoreEnabled()) { + remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); + } else { + if (sourceNode != null && sourceNode.isRemoteStoreNode() == false) { + if (routing.primary() == false) { + throw new IllegalStateException("Can't migrate a remote shard to replica before primary " + routing.shardId()); + } + logger.info("DocRep shard {} is migrating to remote", shardId); + seedRemote = true; + } + remoteDirectory = ((RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory).newDirectory( + RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), + this.indexSettings.getUUID(), + shardId, + this.indexSettings.getRemoteStorePathStrategy() + ); + } remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); } @@ -523,12 +549,14 @@ public synchronized IndexShard createShard( retentionLeaseSyncer, circuitBreakerService, translogFactorySupplier, - this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, + this.indexSettings.isSegRepEnabledOrRemoteNode() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, - clusterRemoteTranslogBufferIntervalSupplier, nodeEnv.nodeId(), - recoverySettings + recoverySettings, + remoteStoreSettings, + seedRemote, + discoveryNodes ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 5aaea2c498701..82875564c1c07 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -48,16 +48,21 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.search.pipeline.SearchPipelineService; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -758,6 +763,7 @@ public static IndexMergePolicy fromString(String text) { private volatile String defaultSearchPipeline; private final boolean widenIndexSortType; + private final boolean assignedOnRemoteNode; /** * The maximum age of a retention lease before it is considered expired. @@ -981,6 +987,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1221,17 +1228,16 @@ public int getNumberOfReplicas() { /** * Returns true if segment replication is enabled on the index. + * + * Every shard on a remote node would also have SegRep enabled even without + * proper index setting during the migration. */ - public boolean isSegRepEnabled() { - return ReplicationType.SEGMENT.equals(replicationType); + public boolean isSegRepEnabledOrRemoteNode() { + return ReplicationType.SEGMENT.equals(replicationType) || isAssignedOnRemoteNode(); } public boolean isSegRepLocalEnabled() { - return isSegRepEnabled() && !isRemoteStoreEnabled(); - } - - public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabled() && isRemoteStoreEnabled(); + return ReplicationType.SEGMENT.equals(replicationType) && !isRemoteStoreEnabled(); } /** @@ -1241,6 +1247,10 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } + public boolean isAssignedOnRemoteNode() { + return assignedOnRemoteNode; + } + /** * Returns if remote translog store is enabled for this index. */ @@ -1897,4 +1907,16 @@ public double getDocIdFuzzySetFalsePositiveProbability() { public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePositiveProbability) { this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; } + + public RemoteStorePathStrategy getRemoteStorePathStrategy() { + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + if (remoteCustomData != null + && remoteCustomData.containsKey(PathType.NAME) + && remoteCustomData.containsKey(PathHashAlgorithm.NAME)) { + PathType pathType = PathType.parseString(remoteCustomData.get(PathType.NAME)); + PathHashAlgorithm pathHashAlgorithm = PathHashAlgorithm.parseString(remoteCustomData.get(PathHashAlgorithm.NAME)); + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + return new RemoteStorePathStrategy(PathType.FIXED); + } } diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index ce38dd3bb236c..297fe093f7f4e 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -145,7 +145,9 @@ public void isSegrepLimitBreached(ShardId shardId) { final IndexService indexService = indicesService.indexService(shardId.getIndex()); if (indexService != null) { final IndexShard shard = indexService.getShard(shardId.id()); - if (isSegmentReplicationBackpressureEnabled && shard.indexSettings().isSegRepEnabled() && shard.routingEntry().primary()) { + if (isSegmentReplicationBackpressureEnabled + && shard.indexSettings().isSegRepEnabledOrRemoteNode() + && shard.routingEntry().primary()) { validateReplicationGroup(shard); } } @@ -264,7 +266,8 @@ protected void runInternal() { stats.getShardStats().get(shardId).getReplicaStats() ); final IndexService indexService = pressureService.indicesService.indexService(shardId.getIndex()); - if (indexService.getIndexSettings() != null && indexService.getIndexSettings().isSegRepEnabled() == false) { + if (indexService.getIndexSettings() != null + && indexService.getIndexSettings().isSegRepEnabledOrRemoteNode() == false) { return; } final IndexShard primaryShard = indexService.getShard(shardId.getId()); diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java index f5fc8aa1c1eea..e48a76c438057 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -45,7 +45,7 @@ public SegmentReplicationStats getStats() { Map stats = new HashMap<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { - if (indexShard.indexSettings().isSegRepEnabled() && indexShard.routingEntry().primary()) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.routingEntry().primary()) { stats.putIfAbsent(indexShard.shardId(), getStatsForShard(indexShard)); } } diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java index bd4936aeec366..392a925c21143 100644 --- a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java @@ -39,7 +39,7 @@ class LongArrayBackedBitSet implements Accountable, Closeable { /** * Constructor which uses Lucene's IndexInput to read the bitset into a read-only buffer. * @param in IndexInput containing the serialized bitset. - * @throws IOException + * @throws IOException I/O exception */ LongArrayBackedBitSet(IndexInput in) throws IOException { underlyingArrayLength = in.readLong(); diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index bf3e10d684c94..8106b65bddeec 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -244,7 +244,7 @@ private static void doValidateCodecSettings(final String codec) { * Creates a new {@link org.opensearch.index.engine.EngineConfig} */ private EngineConfig(Builder builder) { - if (builder.isReadOnlyReplica && builder.indexSettings.isSegRepEnabled() == false) { + if (builder.isReadOnlyReplica && builder.indexSettings.isSegRepEnabledOrRemoteNode() == false) { throw new IllegalArgumentException("Shard can only be wired as a read only replica with Segment Replication enabled"); } this.shardId = builder.shardId; @@ -491,7 +491,7 @@ public LongSupplier getPrimaryTermSupplier() { * @return true if this engine should be wired as read only. */ public boolean isReadOnlyReplica() { - return indexSettings.isSegRepEnabled() && isReadOnlyReplica; + return indexSettings.isSegRepEnabledOrRemoteNode() && isReadOnlyReplica; } /** diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index a25ec95f58e05..7bacec22fc850 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -710,7 +710,7 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) final OpVsLuceneDocStatus status; VersionValue versionValue = getVersionFromMap(op.uid().bytes()); assert incrementVersionLookup(); - boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode(); if (versionValue != null) { status = compareOpToVersionMapOnSeqNo(op.id(), op.seqNo(), op.primaryTerm(), versionValue); } else { @@ -1005,7 +1005,7 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : index.seqNo() + ">=" + maxSeqNoOfUpdatesOrDeletes; plan = IndexingStrategy.optimizedAppendOnly(index.version(), 0); } else { - boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode(); versionMap.enforceSafeAccess(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { @@ -1452,7 +1452,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws // See testRecoveryWithOutOfOrderDelete for an example of peer recovery plan = DeletionStrategy.processButSkipLucene(false, delete.version()); } else { - boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { if (segRepEnabled) { @@ -1868,7 +1868,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { // only after the active reader is updated. This ensures that a flush does not wipe out a required commit point file // while we are // in refresh listeners. - final GatedCloseable latestCommit = engineConfig.getIndexSettings().isSegRepEnabled() + final GatedCloseable latestCommit = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode() ? acquireLastIndexCommit(false) : null; commitIndexWriter(indexWriter, translogManager.getTranslogUUID()); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index ed8dba2f8902d..d759423ce5a55 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -436,7 +436,8 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { This is not required for remote store implementations given on failover the replica re-syncs with the store during promotion. */ - if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false) { + if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false + && engineConfig.getIndexSettings().isAssignedOnRemoteNode() == false) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } diff --git a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java new file mode 100644 index 0000000000000..f4730c70362d1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java @@ -0,0 +1,191 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.regex.Regex; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.plain.ConstantIndexFieldData; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Index specific field mapper + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class ConstantKeywordFieldMapper extends ParametrizedFieldMapper { + + public static final String CONTENT_TYPE = "constant_keyword"; + + private static final String valuePropertyName = "value"; + + /** + * A {@link Mapper.TypeParser} for the constant keyword field. + * + * @opensearch.internal + */ + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + if (!node.containsKey(valuePropertyName)) { + throw new OpenSearchParseException("Field [" + name + "] is missing required parameter [value]"); + } + Object value = node.remove(valuePropertyName); + if (!(value instanceof String)) { + throw new OpenSearchParseException("Field [" + name + "] is expected to be a string value"); + } + return new Builder(name, (String) value); + } + } + + private static ConstantKeywordFieldMapper toType(FieldMapper in) { + return (ConstantKeywordFieldMapper) in; + } + + /** + * Builder for the binary field mapper + * + * @opensearch.internal + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + + private final Parameter value; + + public Builder(String name, String value) { + super(name); + this.value = Parameter.stringParam(valuePropertyName, false, m -> toType(m).value, value); + } + + @Override + public List> getParameters() { + return Arrays.asList(value); + } + + @Override + public ConstantKeywordFieldMapper build(BuilderContext context) { + return new ConstantKeywordFieldMapper( + name, + new ConstantKeywordFieldMapper.ConstantKeywordFieldType(buildFullName(context), value.getValue()), + multiFieldsBuilder.build(this, context), + copyTo.build(), + this + ); + } + } + + /** + * Field type for Index field mapper + * + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + protected static final class ConstantKeywordFieldType extends ConstantFieldType { + + protected final String value; + + public ConstantKeywordFieldType(String name, String value) { + super(name, Collections.emptyMap()); + this.value = value; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + protected boolean matches(String pattern, boolean caseInsensitive, QueryShardContext context) { + return Regex.simpleMatch(pattern, value, caseInsensitive); + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new MatchAllDocsQuery(); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + return new ConstantIndexFieldData.Builder(fullyQualifiedIndexName, name(), CoreValuesSourceType.BYTES); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't " + "support formats."); + } + + return new SourceValueFetcher(name(), context) { + @Override + protected Object parseSourceValue(Object value) { + String keywordValue = value.toString(); + return Collections.singletonList(keywordValue); + } + }; + } + } + + private final String value; + + protected ConstantKeywordFieldMapper( + String simpleName, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + ConstantKeywordFieldMapper.Builder builder + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.value = builder.value.getValue(); + } + + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new ConstantKeywordFieldMapper.Builder(simpleName(), this.value).init(this); + } + + @Override + protected void parseCreateField(ParseContext context) throws IOException { + + final String value; + if (context.externalValueSet()) { + value = context.externalValue().toString(); + } else { + value = context.parser().textOrNull(); + } + if (value == null) { + throw new IllegalArgumentException("constant keyword field [" + name() + "] must have a value"); + } + + if (!value.equals(fieldType().value)) { + throw new IllegalArgumentException("constant keyword field [" + name() + "] must have a value of [" + this.value + "]"); + } + + } + + @Override + public ConstantKeywordFieldMapper.ConstantKeywordFieldType fieldType() { + return (ConstantKeywordFieldMapper.ConstantKeywordFieldType) super.fieldType(); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java new file mode 100644 index 0000000000000..10b5c4a0f7157 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.opensearch.Version; +import org.opensearch.common.Booleans; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.network.InetAddresses; + +import java.net.InetAddress; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Contains logic to get the FieldMapper for a given type of derived field. Also, for a given type of derived field, + * it is used to create an IndexableField for the provided type and object. It is useful when indexing into + * lucene MemoryIndex in {@link org.opensearch.index.query.DerivedFieldQuery}. + */ +enum DerivedFieldSupportedTypes { + + BOOLEAN("boolean", (name, context) -> { + BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // Trying to mimic the logic for parsing source value as used in BooleanFieldMapper valueFetcher + Boolean value; + if (o instanceof Boolean) { + value = (Boolean) o; + } else { + String textValue = o.toString(); + value = Booleans.parseBooleanStrict(textValue, false); + } + return new Field(name, value ? "T" : "F", BooleanFieldMapper.Defaults.FIELD_TYPE); + }), + DATE("date", (name, context) -> { + // TODO: should we support mapping settings exposed by a given field type from derived fields too? + // for example, support `format` for date type? + DateFieldMapper.Builder builder = new DateFieldMapper.Builder( + name, + DateFieldMapper.Resolution.MILLISECONDS, + DateFieldMapper.getDefaultDateTimeFormatter(), + false, + Version.CURRENT + ); + return builder.build(context); + }, name -> o -> new LongPoint(name, (long) o)), + GEO_POINT("geo_point", (name, context) -> { + GeoPointFieldMapper.Builder builder = new GeoPointFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // convert o to array of double + if (!(o instanceof List) || ((List) o).size() != 2 || !(((List) o).get(0) instanceof Double)) { + throw new ClassCastException("geo_point should be in format emit(double lat, double lon) for derived fields"); + } + return new LatLonPoint(name, (Double) ((List) o).get(0), (Double) ((List) o).get(1)); + }), + IP("ip", (name, context) -> { + IpFieldMapper.Builder builder = new IpFieldMapper.Builder(name, false, Version.CURRENT); + return builder.build(context); + }, name -> o -> { + InetAddress address; + if (o instanceof InetAddress) { + address = (InetAddress) o; + } else { + address = InetAddresses.forString(o.toString()); + } + return new InetAddressPoint(name, address); + }), + KEYWORD("keyword", (name, context) -> { + FieldType dummyFieldType = new FieldType(); + dummyFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + KeywordFieldMapper.Builder keywordBuilder = new KeywordFieldMapper.Builder(name); + KeywordFieldMapper.KeywordFieldType keywordFieldType = keywordBuilder.buildFieldType(context, dummyFieldType); + keywordFieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + return new KeywordFieldMapper( + name, + dummyFieldType, + keywordFieldType, + keywordBuilder.multiFieldsBuilder.build(keywordBuilder, context), + keywordBuilder.copyTo.build(), + keywordBuilder + ); + }, name -> o -> new KeywordField(name, (String) o, Field.Store.NO)), + LONG("long", (name, context) -> { + NumberFieldMapper.Builder longBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.LONG, false, false); + return longBuilder.build(context); + }, name -> o -> new LongField(name, Long.parseLong(o.toString()), Field.Store.NO)), + DOUBLE("double", (name, context) -> { + NumberFieldMapper.Builder doubleBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.DOUBLE, false, false); + return doubleBuilder.build(context); + }, name -> o -> new DoubleField(name, Double.parseDouble(o.toString()), Field.Store.NO)); + + final String name; + private final BiFunction builder; + + private final Function> indexableFieldBuilder; + + DerivedFieldSupportedTypes( + String name, + BiFunction builder, + Function> indexableFieldBuilder + ) { + this.name = name; + this.builder = builder; + this.indexableFieldBuilder = indexableFieldBuilder; + } + + public String getName() { + return name; + } + + private FieldMapper getFieldMapper(String name, Mapper.BuilderContext context) { + return builder.apply(name, context); + } + + private Function getIndexableFieldGenerator(String name) { + return indexableFieldBuilder.apply(name); + } + + private static final Map enumMap = Arrays.stream(DerivedFieldSupportedTypes.values()) + .collect(Collectors.toMap(DerivedFieldSupportedTypes::getName, enumValue -> enumValue)); + + public static FieldMapper getFieldMapperFromType(String type, String name, Mapper.BuilderContext context) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getFieldMapper(name, context); + } + + public static Function getIndexableFieldGeneratorType(String type, String name) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getIndexableFieldGenerator(name); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java new file mode 100644 index 0000000000000..abdca7879cc94 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -0,0 +1,363 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.Nullable; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.index.query.DerivedFieldQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +/** + * MappedFieldType for Derived Fields + * Contains logic to execute different type of queries on a derived field of given type. + * @opensearch.internal + */ +public final class DerivedFieldType extends MappedFieldType { + private final String type; + + private final Script script; + + FieldMapper typeFieldMapper; + + final Function indexableFieldGenerator; + + public DerivedFieldType( + String name, + String type, + Script script, + boolean isIndexed, + boolean isStored, + boolean hasDocValues, + Map meta, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + super(name, isIndexed, isStored, hasDocValues, typeFieldMapper.fieldType().getTextSearchInfo(), meta); + this.type = type; + this.script = script; + this.typeFieldMapper = typeFieldMapper; + this.indexableFieldGenerator = fieldFunction; + } + + public DerivedFieldType( + String name, + String type, + Script script, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + this(name, type, script, false, false, false, Collections.emptyMap(), typeFieldMapper, fieldFunction); + } + + @Override + public String typeName() { + return "derived"; + } + + public String getType() { + return type; + } + + @Override + public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQueryCaseInsensitive(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termsQuery(List values, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termsQuery(values, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + ShapeRelation relation, + ZoneId timeZone, + DateMathParser parser, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.rangeQuery( + lowerTerm, + upperTerm, + includeLower, + includeUpper, + relation, + timeZone, + parser, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + method, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.prefixQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.wildcardQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.normalizedWildcardQuery(value, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + Query query = typeFieldMapper.mappedFieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + throw new IllegalArgumentException( + "Can only use span prefix queries on text fields - not on [" + name() + "] which is of type [" + typeName() + "]" + ); + } + + @Override + public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.distanceFeatureQuery(origin, pivot, boost, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support exist queries"); + } + + @Override + public boolean isAggregatable() { + return false; + } + + private DerivedFieldScript.LeafFactory getDerivedFieldLeafFactory(QueryShardContext context) { + if (!context.documentMapper("").sourceMapper().enabled()) { + throw new IllegalArgumentException( + "DerivedFieldQuery error: unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + + context.index().getName() + + "]" + ); + } + DerivedFieldScript.Factory factory = context.compile(script, DerivedFieldScript.CONTEXT); + return factory.newFactory(script.getParams(), context.lookup()); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java new file mode 100644 index 0000000000000..f3bf0c613415a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.List; + +/** + * The value fetcher contains logic to execute script and fetch the value in form of list of object. + * It expects DerivedFieldScript.LeafFactory as an input and sets the contract with consumer to call + * {@link #setNextReader(LeafReaderContext)} whenever a segment is switched. + */ +public final class DerivedFieldValueFetcher implements ValueFetcher { + private DerivedFieldScript derivedFieldScript; + private final DerivedFieldScript.LeafFactory derivedFieldScriptFactory; + + public DerivedFieldValueFetcher(DerivedFieldScript.LeafFactory derivedFieldScriptFactory) { + this.derivedFieldScriptFactory = derivedFieldScriptFactory; + } + + @Override + public List fetchValues(SourceLookup lookup) { + derivedFieldScript.setDocument(lookup.docId()); + // TODO: remove List.of() when derivedFieldScript.execute() returns list of objects. + return List.of(derivedFieldScript.execute()); + } + + public void setNextReader(LeafReaderContext context) { + try { + derivedFieldScript = derivedFieldScriptFactory.newInstance(context); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index c14b2c92c89c3..42b974734e5e7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -218,7 +218,7 @@ protected List> getParameters() { ); } - private KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { + protected KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { NamedAnalyzer normalizer = Lucene.KEYWORD_ANALYZER; NamedAnalyzer searchAnalyzer = Lucene.KEYWORD_ANALYZER; String normalizerName = this.normalizer.getValue(); diff --git a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java new file mode 100644 index 0000000000000..8beef0bf46be0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +/** + * DerivedFieldQuery used for querying derived fields. It contains the logic to execute an input lucene query against + * DerivedField. It also accepts DerivedFieldValueFetcher and SearchLookup as an input. + */ +public final class DerivedFieldQuery extends Query { + private final Query query; + private final DerivedFieldValueFetcher valueFetcher; + private final SearchLookup searchLookup; + private final Function indexableFieldGenerator; + private final Analyzer indexAnalyzer; + + /** + * @param query lucene query to be executed against the derived field + * @param valueFetcher DerivedFieldValueFetcher ValueFetcher to fetch the value of a derived field from _source + * using LeafSearchLookup + * @param searchLookup SearchLookup to get the LeafSearchLookup look used by valueFetcher to fetch the _source + * @param indexableFieldGenerator used to generate lucene IndexableField from a given object fetched by valueFetcher + * to be used in lucene memory index. + */ + public DerivedFieldQuery( + Query query, + DerivedFieldValueFetcher valueFetcher, + SearchLookup searchLookup, + Function indexableFieldGenerator, + Analyzer indexAnalyzer + ) { + this.query = query; + this.valueFetcher = valueFetcher; + this.searchLookup = searchLookup; + this.indexableFieldGenerator = indexableFieldGenerator; + this.indexAnalyzer = indexAnalyzer; + } + + @Override + public void visit(QueryVisitor visitor) { + query.visit(visitor); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + Query rewritten = indexSearcher.rewrite(query); + if (rewritten == query) { + return this; + } + return new DerivedFieldQuery(rewritten, valueFetcher, searchLookup, indexableFieldGenerator, indexAnalyzer); + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + + return new ConstantScoreWeight(this, boost) { + @Override + public Scorer scorer(LeafReaderContext context) { + DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc()); + valueFetcher.setNextReader(context); + LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(context); + TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { + @Override + public boolean matches() { + leafSearchLookup.source().setSegmentAndDocument(context, approximation.docID()); + List values = valueFetcher.fetchValues(leafSearchLookup.source()); + // TODO: in case of errors from script, should it be ignored and treated as missing field + // by using a configurable setting? + MemoryIndex memoryIndex = new MemoryIndex(); + for (Object value : values) { + memoryIndex.addField(indexableFieldGenerator.apply(value), indexAnalyzer); + } + float score = memoryIndex.search(query); + return score > 0.0f; + } + + @Override + public float matchCost() { + // TODO: how can we compute this? + return 1000f; + } + }; + return new ConstantScoreScorer(this, score(), scoreMode, twoPhase); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (sameClassAs(o) == false) { + return false; + } + DerivedFieldQuery other = (DerivedFieldQuery) o; + return Objects.equals(this.query, other.query) + && Objects.equals(this.valueFetcher, other.valueFetcher) + && Objects.equals(this.searchLookup, other.searchLookup) + && Objects.equals(this.indexableFieldGenerator, other.indexableFieldGenerator) + && Objects.equals(this.indexAnalyzer, other.indexAnalyzer); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), query, valueFetcher, searchLookup, indexableFieldGenerator, indexableFieldGenerator); + } + + @Override + public String toString(String f) { + return "DerivedFieldQuery (Query: [ " + query.toString(f) + "])"; + } +} diff --git a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java index 7fd83d5753512..3011a48fbb296 100644 --- a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java @@ -201,6 +201,11 @@ private static Query newObjectFieldExistsQuery(QueryShardContext context, String BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder(); Collection fields = context.simpleMatchToIndexNames(objField + ".*"); for (String field : fields) { + int dotPos = field.lastIndexOf('.'); + if (dotPos > 0 && field.charAt(dotPos + 1) == '_') { + // This is a subfield (e.g. prefix) of a complex field type. Skip it. + continue; + } Query existsQuery = context.getMapperService().fieldType(field).existsQuery(context); booleanQuery.add(existsQuery, Occur.SHOULD); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java new file mode 100644 index 0000000000000..4e557d8c24431 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.hash.FNV1a; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; + +import java.util.Locale; +import java.util.Set; + +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; + +/** + * This class contains the different enums related to remote store like data categories and types, path types + * and hashing algorithm. + * + * @opensearch.api + */ +public class RemoteStoreEnums { + + /** + * Categories of the data in Remote store. + */ + @PublicApi(since = "2.14.0") + public enum DataCategory { + SEGMENTS("segments", Set.of(DataType.values())), + TRANSLOG("translog", Set.of(DATA, METADATA)); + + private final String name; + private final Set supportedDataTypes; + + DataCategory(String name, Set supportedDataTypes) { + this.name = name; + this.supportedDataTypes = supportedDataTypes; + } + + public boolean isSupportedDataType(DataType dataType) { + return supportedDataTypes.contains(dataType); + } + + public String getName() { + return name; + } + } + + /** + * Types of data in remote store. + */ + @PublicApi(since = "2.14.0") + public enum DataType { + DATA("data"), + METADATA("metadata"), + LOCK_FILES("lock_files"); + + private final String name; + + DataType(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } + + /** + * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. + * For more information, see Github issue #12567. + */ + @PublicApi(since = "2.14.0") + public enum PathType { + FIXED { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + // Hash algorithm is not used in FIXED path type + return pathInput.basePath() + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return false; + } + }, + HASHED_PREFIX { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. + // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + return pathInput.basePath() + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return true; + } + }; + + /** + * This method generates the path for the given path input which constitutes multiple fields and characteristics + * of the data. + * + * @param pathInput input. + * @param hashAlgorithm hashing algorithm. + * @return the blob path for the path input. + */ + public BlobPath path(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + DataCategory dataCategory = pathInput.dataCategory(); + DataType dataType = pathInput.dataType(); + assert dataCategory.isSupportedDataType(dataType) : "category:" + + dataCategory + + " type:" + + dataType + + " are not supported together"; + return generatePath(pathInput, hashAlgorithm); + } + + abstract BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm); + + abstract boolean requiresHashAlgorithm(); + + public static PathType parseString(String pathType) { + try { + return PathType.valueOf(pathType.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null + throw new IllegalArgumentException("Could not parse PathType for [" + pathType + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_type"; + + } + + /** + * Type of hashes supported for path types that have hashing. + */ + @PublicApi(since = "2.14.0") + public enum PathHashAlgorithm { + + FNV_1A { + @Override + long hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + return FNV1a.hash32(input); + } + }; + + abstract long hash(PathInput pathInput); + + public static PathHashAlgorithm parseString(String pathHashAlgorithm) { + try { + return PathHashAlgorithm.valueOf(pathHashAlgorithm.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null + throw new IllegalArgumentException("Could not parse PathHashAlgorithm for [" + pathHashAlgorithm + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_hash_algorithm"; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java deleted file mode 100644 index 6e8126fcce0ca..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.indices.IndicesService; - -/** - * Determines the {@link RemoteStorePathType} at the time of index metadata creation. - * - * @opensearch.internal - */ -public class RemoteStorePathResolver { - - private final ClusterSettings clusterSettings; - - public RemoteStorePathResolver(ClusterSettings clusterSettings) { - this.clusterSettings = clusterSettings; - } - - public RemoteStorePathType resolveType() { - return clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java new file mode 100644 index 0000000000000..ce5a6748fd9d4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; + +import java.util.Objects; + +/** + * This class wraps internal details on the remote store path for an index. + * + * @opensearch.internal + */ +@PublicApi(since = "2.14.0") +public class RemoteStorePathStrategy { + + private final PathType type; + + @Nullable + private final PathHashAlgorithm hashAlgorithm; + + public RemoteStorePathStrategy(PathType type) { + this(type, null); + } + + public RemoteStorePathStrategy(PathType type, PathHashAlgorithm hashAlgorithm) { + assert type.requiresHashAlgorithm() == false || Objects.nonNull(hashAlgorithm); + this.type = Objects.requireNonNull(type); + this.hashAlgorithm = hashAlgorithm; + } + + public PathType getType() { + return type; + } + + public PathHashAlgorithm getHashAlgorithm() { + return hashAlgorithm; + } + + @Override + public String toString() { + return "RemoteStorePathStrategy{" + "type=" + type + ", hashAlgorithm=" + hashAlgorithm + '}'; + } + + public BlobPath generatePath(PathInput pathInput) { + return type.generatePath(pathInput, hashAlgorithm); + } + + /** + * Wrapper class for the input required to generate path for remote store uploads. + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + public static class PathInput { + private final BlobPath basePath; + private final String indexUUID; + private final String shardId; + private final DataCategory dataCategory; + private final DataType dataType; + + public PathInput(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { + this.basePath = Objects.requireNonNull(basePath); + this.indexUUID = Objects.requireNonNull(indexUUID); + this.shardId = Objects.requireNonNull(shardId); + this.dataCategory = Objects.requireNonNull(dataCategory); + this.dataType = Objects.requireNonNull(dataType); + } + + BlobPath basePath() { + return basePath; + } + + String indexUUID() { + return indexUUID; + } + + String shardId() { + return shardId; + } + + DataCategory dataCategory() { + return dataCategory; + } + + DataType dataType() { + return dataType; + } + + /** + * Returns a new builder for {@link PathInput}. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link PathInput}. + * + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + public static class Builder { + private BlobPath basePath; + private String indexUUID; + private String shardId; + private DataCategory dataCategory; + private DataType dataType; + + public Builder basePath(BlobPath basePath) { + this.basePath = basePath; + return this; + } + + public Builder indexUUID(String indexUUID) { + this.indexUUID = indexUUID; + return this; + } + + public Builder shardId(String shardId) { + this.shardId = shardId; + return this; + } + + public Builder dataCategory(DataCategory dataCategory) { + this.dataCategory = dataCategory; + return this; + } + + public Builder dataType(DataType dataType) { + this.dataType = dataType; + return this; + } + + public PathInput build() { + return new PathInput(basePath, indexUUID, shardId, dataCategory, dataType); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java new file mode 100644 index 0000000000000..20fc516132220 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.IndicesService; + +/** + * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. + * + * @opensearch.internal + */ +public class RemoteStorePathStrategyResolver { + + private volatile PathType type; + + public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings) { + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); + } + + public RemoteStorePathStrategy get() { + return new RemoteStorePathStrategy(type, PathHashAlgorithm.FNV_1A); + } + + private void setType(PathType type) { + this.type = type; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java deleted file mode 100644 index a64e07ab1f66f..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import java.util.Locale; - -/** - * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. - * For more information, see Github issue #12567. - * - * @opensearch.internal - */ -public enum RemoteStorePathType { - - FIXED, - HASHED_PREFIX; - - public static RemoteStorePathType parseString(String remoteStoreBlobPathType) { - try { - return RemoteStorePathType.valueOf(remoteStoreBlobPathType.toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("Could not parse RemoteStorePathType for [" + remoteStoreBlobPathType + "]"); - } - } - - /** - * This string is used as key for storing information in the custom data in index settings. - */ - public static final String NAME = "path_type"; -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java index 9a146be96c9de..21753e68db498 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java @@ -68,7 +68,7 @@ public RemoteStoreStatsTrackerFactory(ClusterService clusterService, Settings se @Override public void afterIndexShardCreated(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteStoreEnabled() == false) { + if (indexShard.indexSettings().isRemoteStoreEnabled() == false && indexShard.indexSettings().isAssignedOnRemoteNode() == false) { return; } ShardId shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 576e00f8f30d1..bb61e1afa05f4 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -163,6 +163,8 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + private long searchIdleReactivateCount; + @Nullable private RequestStatsLongHolder requestStatsLongHolder; @@ -193,7 +195,8 @@ public Stats( long pitCurrent, long suggestCount, long suggestTimeInMillis, - long suggestCurrent + long suggestCurrent, + long searchIdleReactivateCount ) { this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; @@ -220,6 +223,8 @@ public Stats( this.pitCount = pitCount; this.pitTimeInMillis = pitTimeInMillis; this.pitCurrent = pitCurrent; + + this.searchIdleReactivateCount = searchIdleReactivateCount; } private Stats(StreamInput in) throws IOException { @@ -255,6 +260,10 @@ private Stats(StreamInput in) throws IOException { concurrentQueryCurrent = in.readVLong(); queryConcurrency = in.readVLong(); } + + if (in.getVersion().onOrAfter(Version.V_2_14_0)) { + searchIdleReactivateCount = in.readVLong(); + } } public void add(Stats stats) { @@ -282,6 +291,8 @@ public void add(Stats stats) { pitCount += stats.pitCount; pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; + + searchIdleReactivateCount += stats.searchIdleReactivateCount; } public void addForClosingShard(Stats stats) { @@ -306,6 +317,8 @@ public void addForClosingShard(Stats stats) { pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; queryConcurrency += stats.queryConcurrency; + + searchIdleReactivateCount += stats.searchIdleReactivateCount; } public long getQueryCount() { @@ -412,6 +425,10 @@ public long getSuggestCurrent() { return suggestCurrent; } + public long getSearchIdleReactivateCount() { + return searchIdleReactivateCount; + } + public static Stats readStats(StreamInput in) throws IOException { return new Stats(in); } @@ -457,6 +474,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(concurrentQueryCurrent); out.writeVLong(queryConcurrency); } + + if (out.getVersion().onOrAfter(Version.V_2_14_0)) { + out.writeVLong(searchIdleReactivateCount); + } } @Override @@ -486,6 +507,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + builder.field(Fields.SEARCH_IDLE_REACTIVATE_COUNT_TOTAL, searchIdleReactivateCount); + if (requestStatsLongHolder != null) { builder.startObject(Fields.REQUEST); @@ -654,6 +677,7 @@ static final class Fields { static final String TIME = "time"; static final String CURRENT = "current"; static final String TOTAL = "total"; + static final String SEARCH_IDLE_REACTIVATE_COUNT_TOTAL = "search_idle_reactivate_count_total"; } diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 99e3f8465c5db..3098986852cc1 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -213,6 +213,11 @@ public void onFreePitContext(ReaderContext readerContext) { totalStats.pitMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } + @Override + public void onSearchIdleReactivation() { + totalStats.searchIdleMetric.inc(); + } + /** * Holder of statistics values * @@ -239,6 +244,7 @@ static final class StatsHolder { final CounterMetric scrollCurrent = new CounterMetric(); final CounterMetric pitCurrent = new CounterMetric(); final CounterMetric suggestCurrent = new CounterMetric(); + final CounterMetric searchIdleMetric = new CounterMetric(); SearchStats.Stats stats() { return new SearchStats.Stats( @@ -260,7 +266,8 @@ SearchStats.Stats stats() { pitCurrent.count(), suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), - suggestCurrent.count() + suggestCurrent.count(), + searchIdleMetric.count() ); } } diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index ca1dfe2d5ad01..c6a1f5f27a875 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -135,7 +135,7 @@ protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint() - && indexShard.isRemoteTranslogEnabled() == false) { + && indexShard.indexSettings().isAssignedOnRemoteNode() == false) { indexShard.sync(); } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 7b9c1d3aa548f..599c54f293e89 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -253,6 +253,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private volatile ReplicationCheckpoint latestReplicationCheckpoint; + private final Function isShardOnRemoteEnabledNode; + /** * Get all retention leases tracked on this shard. * @@ -999,7 +1001,8 @@ public ReplicationTracker( final LongConsumer onGlobalCheckpointUpdated, final LongSupplier currentTimeMillisSupplier, final BiConsumer> onSyncRetentionLeases, - final Supplier safeCommitInfoSupplier + final Supplier safeCommitInfoSupplier, + final Function isShardOnRemoteEnabledNode ) { this( shardId, @@ -1011,7 +1014,8 @@ public ReplicationTracker( currentTimeMillisSupplier, onSyncRetentionLeases, safeCommitInfoSupplier, - x -> {} + x -> {}, + isShardOnRemoteEnabledNode ); } @@ -1037,7 +1041,8 @@ public ReplicationTracker( final LongSupplier currentTimeMillisSupplier, final BiConsumer> onSyncRetentionLeases, final Supplier safeCommitInfoSupplier, - final Consumer onReplicationGroupUpdated + final Consumer onReplicationGroupUpdated, + final Function isShardOnRemoteEnabledNode ) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; @@ -1059,7 +1064,8 @@ public ReplicationTracker( this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; - this.latestReplicationCheckpoint = indexSettings.isSegRepEnabled() ? ReplicationCheckpoint.empty(shardId) : null; + this.latestReplicationCheckpoint = indexSettings.isSegRepEnabledOrRemoteNode() ? ReplicationCheckpoint.empty(shardId) : null; + this.isShardOnRemoteEnabledNode = isShardOnRemoteEnabledNode; assert Version.V_EMPTY.equals(indexSettings.getIndexVersionCreated()) == false; assert invariant(); } @@ -1088,8 +1094,12 @@ private ReplicationGroup calculateReplicationGroup() { } else { newVersion = replicationGroup.getVersion() + 1; } - - assert indexSettings().isRemoteTranslogStoreEnabled() + assert indexSettings.isRemoteTranslogStoreEnabled() + // Handle migration cases. Ignore assertion if any of the shard copies in the replication group is assigned to a remote node + || (replicationGroup != null + && replicationGroup.getReplicationTargets() + .stream() + .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId()))) || checkpoints.entrySet().stream().filter(e -> e.getValue().tracked).allMatch(e -> e.getValue().replicated) : "In absence of remote translog store, all tracked shards must have replication mode as LOGICAL_REPLICATION"; @@ -1173,7 +1183,7 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI * @param visibleCheckpoint the visible checkpoint */ public synchronized void updateVisibleCheckpointForShard(final String allocationId, final ReplicationCheckpoint visibleCheckpoint) { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); assert primaryMode; assert handoffInProgress == false; assert invariant(); @@ -1217,7 +1227,7 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation * @param checkpoint {@link ReplicationCheckpoint} */ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint checkpoint) { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); if (checkpoint.equals(latestReplicationCheckpoint) == false) { this.latestReplicationCheckpoint = checkpoint; } @@ -1248,7 +1258,9 @@ private void createReplicationLagTimers() { if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false && isPrimaryRelocation(allocationId) == false - && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) { + && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) + && (indexSettings.isSegRepLocalEnabled() == true + || isShardOnRemoteEnabledNode.apply(routingTable.getByAllocationId(allocationId).currentNodeId()))) { cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> new SegmentReplicationLagTimer()); logger.trace( () -> new ParameterizedMessage( @@ -1269,7 +1281,7 @@ && isPrimaryRelocation(allocationId) == false * @param checkpoint {@link ReplicationCheckpoint} */ public synchronized void startReplicationLagTimers(ReplicationCheckpoint checkpoint) { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); if (checkpoint.equals(latestReplicationCheckpoint) == false) { this.latestReplicationCheckpoint = checkpoint; } @@ -1294,7 +1306,7 @@ && isPrimaryRelocation(e.getKey()) == false * V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group. */ public synchronized Set getSegmentReplicationStats() { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); if (primaryMode) { return this.checkpoints.entrySet() .stream() @@ -1367,7 +1379,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) - || indexSettings().isRemoteTranslogStoreEnabled()) { + || indexSettings.isAssignedOnRemoteNode()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; @@ -1453,7 +1465,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, inSync, inSync, - isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + initializingId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, initializingId) + ) ) ); } @@ -1472,7 +1489,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, false, false, - isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + initializingId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, initializingId) + ) ) ); } @@ -1486,7 +1508,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, true, true, - isReplicated(inSyncId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + inSyncId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, inSyncId) + ) ) ); } @@ -1503,6 +1530,12 @@ public synchronized void updateFromClusterManager( assert invariant(); } + private boolean assignedToRemoteStoreNode(IndexShardRoutingTable routingTable, String allocationId) { + return indexSettings().isRemoteStoreEnabled() + || (routingTable.getByAllocationId(allocationId) != null + && isShardOnRemoteEnabledNode.apply(routingTable.getByAllocationId(allocationId).currentNodeId())); + } + /** * Returns whether the requests are replicated considering the remote translog existence, current/primary/primary target allocation ids. * @@ -1511,13 +1544,16 @@ public synchronized void updateFromClusterManager( * @param primaryTargetAllocationId primary target allocation id * @return the replication mode. */ - private boolean isReplicated(String allocationId, String primaryAllocationId, String primaryTargetAllocationId) { - // If remote translog is enabled, then returns replication mode checking current allocation id against the + private boolean isReplicated( + String allocationId, + String primaryAllocationId, + String primaryTargetAllocationId, + boolean assignedToRemoteStoreNode + ) { + // If assigned to a remote node, returns true if given allocation id matches the primary or it's relocation target allocation // primary and primary target allocation id. - // If remote translog is enabled, then returns true if given allocation id matches the primary or it's relocation target allocation - // id. - if (indexSettings().isRemoteTranslogStoreEnabled()) { - return (allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId)); + if (assignedToRemoteStoreNode == true) { + return allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId); } // For other case which is local translog, return true as the requests are replicated to all shards in the replication group. return true; diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index 675d60ec2b63d..7f0806059155a 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -43,7 +43,7 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode() - && !shard.indexSettings.isSegRepWithRemoteEnabled()) { + && shard.indexSettings.isAssignedOnRemoteNode() == false) { publisher.publish(shard, shard.getLatestReplicationCheckpoint()); } return true; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 977155a1cbb72..be9a7872cd89c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -67,6 +67,8 @@ import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -179,6 +181,7 @@ import org.opensearch.index.warmer.WarmerStats; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; @@ -234,6 +237,9 @@ import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_SEEDED; +import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_UNSEEDED; +import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_NON_MIGRATING; import static org.opensearch.index.translog.Translog.Durability; import static org.opensearch.index.translog.Translog.TRANSLOG_UUID_KEY; @@ -346,6 +352,14 @@ Runnable getGlobalCheckpointSyncer() { private final List internalRefreshListener = new ArrayList<>(); private final RemoteStoreFileDownloader fileDownloader; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; + /* + On source doc rep node, It will be DOCREP_NON_MIGRATING. + On source remote node , it will be REMOTE_MIGRATING_SEEDED when relocating from remote node + On source remote node , it will be REMOTE_MIGRATING_UNSEEDED when relocating from docrep node + */ + private final ShardMigrationState shardMigrationState; + private DiscoveryNodes discoveryNodes; public IndexShard( final ShardRouting shardRouting, @@ -372,9 +386,11 @@ public IndexShard( @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final Supplier clusterRemoteTranslogBufferIntervalSupplier, final String nodeId, - final RecoverySettings recoverySettings + final RecoverySettings recoverySettings, + final RemoteStoreSettings remoteStoreSettings, + boolean seedRemote, + final DiscoveryNodes discoveryNodes ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -394,8 +410,8 @@ public IndexShard( logger, threadPool, this::getEngine, - indexSettings.isRemoteTranslogStoreEnabled(), - () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) + indexSettings.isAssignedOnRemoteNode(), + () -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval) ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -436,7 +452,8 @@ public IndexShard( threadPool::absoluteTimeInMillis, (retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener), this::getSafeCommitInfo, - pendingReplicationActions + pendingReplicationActions, + isShardOnRemoteEnabledNode ); // the query cache is a node-level thing, however we want the most popular filters @@ -471,7 +488,10 @@ public boolean shouldCache(Query query) { : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); + this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); + this.discoveryNodes = discoveryNodes; } public ThreadPool getThreadPool() { @@ -482,6 +502,37 @@ public Store store() { return this.store; } + public boolean isMigratingToRemote() { + // set it true only if shard is remote, but index setting doesn't say so + return shardMigrationState == REMOTE_MIGRATING_UNSEEDED || shardMigrationState == REMOTE_MIGRATING_SEEDED; + } + + public boolean shouldSeedRemoteStore() { + // set it true only if relocating from docrep to remote store + return shardMigrationState == REMOTE_MIGRATING_UNSEEDED; + } + + /** + * To be delegated to {@link ReplicationTracker} so that relevant remote store based + * operations can be ignored during engine migration + *

    + * Has explicit null checks to ensure that the {@link ReplicationTracker#invariant()} + * checks does not fail during a cluster manager state update when the latest replication group + * calculation is not yet done and the cached replication group details are available + */ + public Function isShardOnRemoteEnabledNode = nodeId -> { + DiscoveryNode node = discoveryNodes.get(nodeId); + if (node != null) { + logger.trace("Node {} has remote_enabled as {}", nodeId, node.isRemoteStoreNode()); + return node.isRemoteStoreNode(); + } + return false; + }; + + public boolean isRemoteSeeded() { + return shardMigrationState == REMOTE_MIGRATING_SEEDED; + } + public Store remoteStore() { return this.remoteStore; } @@ -573,6 +624,10 @@ public RecoverySettings getRecoverySettings() { return recoverySettings; } + public RemoteStoreSettings getRemoteStoreSettings() { + return remoteStoreSettings; + } + public RemoteStoreFileDownloader getFileDownloader() { return fileDownloader; } @@ -584,8 +639,10 @@ public void updateShardState( final BiConsumer> primaryReplicaSyncer, final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable + final IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException { + this.discoveryNodes = discoveryNodes; final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; @@ -625,7 +682,7 @@ public void updateShardState( // Flush here after relocation of primary, so that replica get all changes from new primary rather than waiting for more // docs to get indexed. - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { flush(new FlushRequest().waitIfOngoing(true).force(true)); } } else if (currentRouting.primary() @@ -705,7 +762,7 @@ public void updateShardState( + newRouting; assert getOperationPrimaryTerm() == newPrimaryTerm; try { - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; ReplicationTimer timer = new ReplicationTimer(); @@ -725,7 +782,7 @@ public void updateShardState( } replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { // force publish a checkpoint once in primary mode so that replicas not caught up to previous primary // are brought up to date. checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); @@ -839,8 +896,8 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta * relocated. After all operations are successfully blocked, performSegRep is executed followed by target relocation * handoff. * + * @param consumer a {@link Runnable} that is executed after performSegRep * @param performSegRep a {@link Runnable} that is executed after operations are blocked - * @param consumer a {@link Runnable} that is executed after performSegRep * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation * @throws IllegalStateException if the relocation target is no longer part of the replication group * @throws InterruptedException if blocking operations is interrupted @@ -858,7 +915,8 @@ public void relocated( indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { forceRefreshes.close(); - boolean syncTranslog = isRemoteTranslogEnabled() && Durability.ASYNC == indexSettings.getTranslogDurability(); + boolean syncTranslog = (isRemoteTranslogEnabled() || this.isMigratingToRemote()) + && Durability.ASYNC == indexSettings.getTranslogDurability(); // Since all the index permits are acquired at this point, the translog buffer will not change. // It is safe to perform sync of translogs now as this will ensure for remote-backed indexes, the // translogs has been uploaded to the remote store. @@ -881,6 +939,7 @@ public void relocated( : "in-flight operations in progress while moving shard state to relocated"; performSegRep.run(); + /* * We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a * network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations. @@ -1041,7 +1100,7 @@ private Engine.IndexResult applyIndexOperation( // For Segment Replication enabled replica shards we can be skip parsing the documents as we directly copy segments from primary // shard. - if (indexSettings.isSegRepEnabled() && routingEntry().primary() == false) { + if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary() == false) { Engine.Index index = new Engine.Index( new Term(IdFieldMapper.NAME, Uid.encodeId(id)), new ParsedDocument(null, null, id, null, null, sourceToParse.source(), sourceToParse.getMediaType(), null), @@ -1240,7 +1299,7 @@ public Engine.DeleteResult applyDeleteOperationOnPrimary( } public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long opPrimaryTerm, long version, String id) throws IOException { - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { final Engine.Delete delete = new Engine.Delete( id, new Term(IdFieldMapper.NAME, Uid.encodeId(id)), @@ -1435,12 +1494,12 @@ public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean inclu SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); // Populate remote_store stats only if the index is remote store backed - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings().isAssignedOnRemoteNode()) { segmentsStats.addRemoteSegmentStats( new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats()) ); } - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { segmentsStats.addReplicationStats(getReplicationStats()); } return segmentsStats; @@ -1457,7 +1516,7 @@ public FieldDataStats fieldDataStats(String... fields) { public TranslogStats translogStats() { TranslogStats translogStats = getEngine().translogManager().getTranslogStats(); // Populate remote_store stats only if the index is remote store backed - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isAssignedOnRemoteNode()) { translogStats.addRemoteTranslogStats( new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) ); @@ -1496,7 +1555,7 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { - if (isRemoteTranslogEnabled()) { + if (indexSettings.isAssignedOnRemoteNode()) { return; } verifyNotClosed(); @@ -1661,7 +1720,7 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { * */ public Tuple, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); // do not close the snapshot - caller will close it. GatedCloseable snapshot = null; @@ -1720,7 +1779,7 @@ ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) th * @return - True if the shard is able to perform segment replication. */ public boolean isSegmentReplicationAllowed() { - if (indexSettings.isSegRepEnabled() == false) { + if (indexSettings.isSegRepEnabledOrRemoteNode() == false) { logger.trace("Attempting to perform segment replication when it is not enabled on the index"); return false; } @@ -2016,7 +2075,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ public RemoteSegmentStoreDirectory getRemoteDirectory() { - assert indexSettings.isRemoteStoreEnabled(); + assert indexSettings.isAssignedOnRemoteNode(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); @@ -2028,8 +2087,8 @@ public RemoteSegmentStoreDirectory getRemoteDirectory() { * Returns true iff it is able to verify that remote segment store * is in sync with local */ - boolean isRemoteSegmentStoreInSync() { - assert indexSettings.isRemoteStoreEnabled(); + public boolean isRemoteSegmentStoreInSync() { + assert indexSettings.isAssignedOnRemoteNode(); try { RemoteSegmentStoreDirectory directory = getRemoteDirectory(); if (directory.readLatestMetadataFile() != null) { @@ -2059,6 +2118,46 @@ boolean isRemoteSegmentStoreInSync() { return false; } + public void waitForRemoteStoreSync() { + waitForRemoteStoreSync(() -> {}); + } + + /* + Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout + Calls onProgress on seeing an increased file count on remote + */ + public void waitForRemoteStoreSync(Runnable onProgress) { + assert indexSettings.isAssignedOnRemoteNode(); + RemoteSegmentStoreDirectory directory = getRemoteDirectory(); + int segmentUploadeCount = 0; + if (shardRouting.primary() == false) { + return; + } + long startNanos = System.nanoTime(); + + while (System.nanoTime() - startNanos < getRecoverySettings().internalRemoteUploadTimeout().nanos()) { + try { + if (isRemoteSegmentStoreInSync()) { + break; + } else { + if (directory.getSegmentsUploadedToRemoteStore().size() > segmentUploadeCount) { + onProgress.run(); + logger.debug("Uploaded segment count {}", directory.getSegmentsUploadedToRemoteStore().size()); + segmentUploadeCount = directory.getSegmentsUploadedToRemoteStore().size(); + } + try { + Thread.sleep(TimeValue.timeValueSeconds(30).seconds()); + } catch (InterruptedException ie) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); + } + } + } catch (AlreadyClosedException e) { + // There is no point in waiting as shard is now closed . + return; + } + } + } + public void preRecovery() { final IndexShardState currentState = this.state; // single volatile read if (currentState == IndexShardState.CLOSED) { @@ -2203,7 +2302,7 @@ public long recoverLocallyAndFetchStartSeqNo(boolean localTranslog) { * @return the starting sequence number from which the recovery should start. */ private long recoverLocallyUptoLastCommit() { - assert isRemoteTranslogEnabled() : "Remote translog store is not enabled"; + assert indexSettings.isAssignedOnRemoteNode() : "Remote translog store is not enabled"; long seqNo; validateLocalRecoveryState(); @@ -2449,7 +2548,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded()) { // Download missing segments from remote segment store. if (syncFromRemote) { syncSegmentsFromRemoteSegmentStore(false); @@ -2488,7 +2587,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b onNewEngine(newEngine); currentEngineReference.set(newEngine); - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { // set initial replication checkpoints into tracker. updateReplicationCheckpoint(); } @@ -2900,7 +2999,7 @@ public Translog.Snapshot getHistoryOperations(String reason, long startingSeqNo, * This method should only be invoked if Segment Replication or Remote Store is not enabled. */ public Translog.Snapshot getHistoryOperationsFromTranslog(long startingSeqNo, long endSeqNo) throws IOException { - assert (indexSettings.isSegRepEnabled() || indexSettings.isRemoteStoreEnabled()) == false + assert indexSettings.isSegRepEnabledOrRemoteNode() == false : "unsupported operation for segment replication enabled indices or remote store backed indices"; return getEngine().translogManager().newChangesSnapshot(startingSeqNo, endSeqNo, true); } @@ -3067,7 +3166,7 @@ public Set getReplicationStatsForTrackedReplicas() } public ReplicationStats getReplicationStats() { - if (indexSettings.isSegRepEnabled() && routingEntry().primary()) { + if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary()) { final Set stats = getReplicationStatsForTrackedReplicas(); long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); @@ -3422,7 +3521,7 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint. */ assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) - || indexSettings.isRemoteTranslogStoreEnabled() : "supposedly in-sync shard copy received a global checkpoint [" + || indexSettings.isAssignedOnRemoteNode() : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" @@ -3446,7 +3545,14 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p + "] does not contain relocation target [" + routingEntry() + "]"; - assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint() + String allocationId = routingEntry().allocationId().getId(); + if (isRemoteStoreEnabled() || isMigratingToRemote()) { + // For remote backed indexes, old primary may not have updated value of local checkpoint of new primary. + // But the new primary is always updated with data in remote sore and is at par with old primary. + // So, we can use a stricter check where local checkpoint of new primary is checked against that of old primary. + allocationId = primaryContext.getRoutingTable().primaryShard().allocationId().getId(); + } + assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(allocationId).getLocalCheckpoint() || indexSettings().getTranslogDurability() == Durability.ASYNC : "local checkpoint [" + getLocalCheckpoint() + "] does not match checkpoint from primary context [" @@ -3459,7 +3565,7 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingE } private void postActivatePrimaryMode() { - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isAssignedOnRemoteNode()) { // We make sure to upload translog (even if it does not contain any operations) to remote translog. // This helps to get a consistent state in remote store where both remote segment store and remote // translog contains data. @@ -3846,14 +3952,14 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.clear(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { internalRefreshListener.add(new ReplicationCheckpointUpdater()); } if (this.checkpointPublisher != null && shardRouting.primary() && indexSettings.isSegRepLocalEnabled()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); } - if (isRemoteStoreEnabled()) { + if (isRemoteStoreEnabled() || isMigratingToRemote()) { internalRefreshListener.add( new RemoteStoreRefreshListener( this, @@ -3867,10 +3973,15 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro With segment replication enabled for primary relocation, recover replica shard initially as read only and change to a writeable engine during relocation handoff after a round of segment replication. */ - boolean isReadOnlyReplica = indexSettings.isSegRepEnabled() + boolean isReadOnlyReplica = indexSettings.isSegRepEnabledOrRemoteNode() && (shardRouting.primary() == false || (shardRouting.isRelocationTarget() && recoveryState.getStage() != RecoveryState.Stage.FINALIZE)); + // For mixed mode, when relocating from doc rep to remote node, we use a writeable engine + if (shouldSeedRemoteStore()) { + isReadOnlyReplica = false; + } + return this.engineConfigFactory.newEngineConfig( shardId, threadPool, @@ -3895,7 +4006,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro this::getOperationPrimaryTerm, tombstoneDocSupplier(), isReadOnlyReplica, - this::isStartedPrimary, + this::enableUploadToRemoteTranslog, translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for // timeseries @@ -3907,7 +4018,7 @@ private boolean isRemoteStoreEnabled() { } public boolean isRemoteTranslogEnabled() { - return indexSettings() != null && indexSettings().isRemoteTranslogStoreEnabled(); + return indexSettings() != null && (indexSettings().isRemoteTranslogStoreEnabled()); } /** @@ -3916,7 +4027,24 @@ public boolean isRemoteTranslogEnabled() { * translog uploads. */ public boolean isStartedPrimary() { - return getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED; + return (getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED); + } + + public boolean enableUploadToRemoteTranslog() { + return isStartedPrimary() || (shouldSeedRemoteStore() && hasOneRemoteSegmentSyncHappened()); + } + + private boolean hasOneRemoteSegmentSyncHappened() { + assert indexSettings.isAssignedOnRemoteNode(); + // We upload remote translog only after one remote segment upload in case of migration + RemoteSegmentStoreDirectory rd = getRemoteDirectory(); + AtomicBoolean segment_n_uploaded = new AtomicBoolean(false); + rd.getSegmentsUploadedToRemoteStore().forEach((key, value) -> { + if (key.startsWith("segments")) { + segment_n_uploaded.set(true); + } + }); + return segment_n_uploaded.get(); } /** @@ -4229,7 +4357,7 @@ private void innerAcquireReplicaOperationPermit( ); // With Segment Replication enabled, we never want to reset a replica's engine unless // it is promoted to primary. - if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabled() == false) { + if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabledOrRemoteNode() == false) { resetEngineToGlobalCheckpoint(); } else { getEngine().translogManager().rollTranslogGeneration(); @@ -4521,10 +4649,10 @@ public final boolean isSearchIdle() { public final boolean isSearchIdleSupported() { // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh // task continues to upload to remote store periodically. - if (isRemoteTranslogEnabled()) { + if (isRemoteTranslogEnabled() || indexSettings.isAssignedOnRemoteNode()) { return false; } - return indexSettings.isSegRepEnabled() == false || indexSettings.getNumberOfReplicas() == 0; + return indexSettings.isSegRepEnabledOrRemoteNode() == false || indexSettings.getNumberOfReplicas() == 0; } /** @@ -4587,9 +4715,14 @@ public void afterRefresh(boolean didRefresh) { * true if the listener was registered to wait for a refresh. */ public final void awaitShardSearchActive(Consumer listener) { + boolean isSearchIdle = isSearchIdle(); markSearcherAccessed(); // move the shard into non-search idle final Translog.Location location = pendingRefreshLocation.get(); if (location != null) { + if (isSearchIdle) { + SearchOperationListener searchOperationListener = getSearchOperationListener(); + searchOperationListener.onSearchIdleReactivation(); + } addRefreshListener(location, (b) -> { pendingRefreshLocation.compareAndSet(location, null); listener.accept(true); @@ -4786,10 +4919,10 @@ public void close() throws IOException { } }; IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded()) { syncSegmentsFromRemoteSegmentStore(false); } - if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { + if ((indexSettings.isRemoteTranslogStoreEnabled() || this.isRemoteSeeded()) && shardRouting.primary()) { syncRemoteTranslogAndUpdateGlobalCheckpoint(); } newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker))); @@ -4808,7 +4941,9 @@ public void close() throws IOException { // of truth for translog, we play all translogs that exists locally. Otherwise, the recoverUpto happens upto global checkpoint. // We also replay all local translog ops with Segment replication, because on engine swap our local translog may // hold more ops than the global checkpoint. - long recoverUpto = this.isRemoteTranslogEnabled() || indexSettings().isSegRepEnabled() ? Long.MAX_VALUE : globalCheckpoint; + long recoverUpto = this.isRemoteTranslogEnabled() || indexSettings().isSegRepEnabledOrRemoteNode() + ? Long.MAX_VALUE + : globalCheckpoint; newEngineReference.get() .translogManager() .recoverFromTranslog(translogRunner, newEngineReference.get().getProcessedLocalCheckpoint(), recoverUpto); @@ -4834,14 +4969,31 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool()); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy()); + } + + /* + Cleans up remote store and remote translog contents. + This is used in remote store migration, where we want to clean up all stale segment and translog data + and seed the remote store afresh + */ + public void deleteRemoteStoreContents() throws IOException { + deleteTranslogFilesFromRemoteTranslog(); + getRemoteDirectory().deleteStaleSegments(0); } public void syncTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.download(repository, shardId, getThreadPool(), shardPath().resolveTranslog(), logger); + RemoteFsTranslog.download( + repository, + shardId, + getThreadPool(), + shardPath().resolveTranslog(), + indexSettings.getRemoteStorePathStrategy(), + logger + ); } /** @@ -4862,7 +5014,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOE public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { boolean syncSegmentSuccess = false; long startTimeMs = System.currentTimeMillis(); - assert indexSettings.isRemoteStoreEnabled(); + assert indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); // We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that @@ -5127,4 +5279,20 @@ private TimeValue getRemoteTranslogUploadBufferInterval(Supplier clus public AsyncIOProcessor getTranslogSyncProcessor() { return translogSyncProcessor; } + + enum ShardMigrationState { + REMOTE_NON_MIGRATING, + REMOTE_MIGRATING_SEEDED, + REMOTE_MIGRATING_UNSEEDED, + DOCREP_NON_MIGRATING + } + + static ShardMigrationState getShardMigrationState(IndexSettings indexSettings, boolean shouldSeed) { + if (indexSettings.isAssignedOnRemoteNode() && indexSettings.isRemoteStoreEnabled()) { + return REMOTE_NON_MIGRATING; + } else if (indexSettings.isAssignedOnRemoteNode()) { + return shouldSeed ? REMOTE_MIGRATING_UNSEEDED : REMOTE_MIGRATING_SEEDED; + } + return ShardMigrationState.DOCREP_NON_MIGRATING; + } } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 7bb80b736693f..351aec6e3af6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -224,7 +224,7 @@ private boolean syncSegments() { // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. if (isRefreshAfterCommit()) { - remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles()); + remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles()); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { @@ -528,7 +528,7 @@ private void initializeRemoteDirectoryOnTermUpdate() throws IOException { * @return true iff the shard is a started with primary mode true or it is local or snapshot recovery. */ private boolean isReadyForUpload() { - boolean isReady = indexShard.isStartedPrimary() || isLocalOrSnapshotRecovery(); + boolean isReady = indexShard.isStartedPrimary() || isLocalOrSnapshotRecovery() || indexShard.shouldSeedRemoteStore(); if (isReady == false) { StringBuilder sb = new StringBuilder("Skipped syncing segments with"); diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index 849a4f9c15318..94079db468f9c 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -145,6 +145,11 @@ default void onNewPitContext(ReaderContext readerContext) {} */ default void onFreePitContext(ReaderContext readerContext) {} + /** + * Executed when a shard goes from idle to non-idle state + */ + default void onSearchIdleReactivation() {} + /** * A Composite listener that multiplexes calls to each of the listeners methods. */ @@ -310,5 +315,16 @@ public void onFreePitContext(ReaderContext readerContext) { } } } + + @Override + public void onSearchIdleReactivation() { + for (SearchOperationListener listener : listeners) { + try { + listener.onSearchIdleReactivation(); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onNewSearchIdleReactivation listener [{}] failed", listener), e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 3faef2da05320..c74ab5e24a980 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -38,13 +38,11 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; -import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -60,6 +58,8 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineException; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; @@ -194,7 +194,7 @@ void recoverFromLocalShards( // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { - waitForRemoteStoreSync(indexShard); + indexShard.waitForRemoteStoreSync(); if (indexShard.isRemoteSegmentStoreInSync() == false) { throw new IndexShardRecoveryException( indexShard.shardId(), @@ -411,7 +411,9 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - shardId + shardId, + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); sourceRemoteDirectory.initializeToSpecificCommit( primaryTerm, @@ -436,7 +438,7 @@ void recoverFromSnapshotAndRemoteStore( indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { - waitForRemoteStoreSync(indexShard); + indexShard.waitForRemoteStoreSync(); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -722,7 +724,7 @@ private void restore( indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { - waitForRemoteStoreSync(indexShard); + indexShard.waitForRemoteStoreSync(); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -796,31 +798,4 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO ); store.associateIndexWithNewTranslog(translogUUID); } - - /* - Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout - */ - private void waitForRemoteStoreSync(IndexShard indexShard) { - if (indexShard.shardRouting.primary() == false) { - return; - } - long startNanos = System.nanoTime(); - - while (System.nanoTime() - startNanos < indexShard.getRecoverySettings().internalRemoteUploadTimeout().nanos()) { - try { - if (indexShard.isRemoteSegmentStoreInSync()) { - break; - } else { - try { - Thread.sleep(TimeValue.timeValueMinutes(1).seconds()); - } catch (InterruptedException ie) { - throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); - } - } - } catch (AlreadyClosedException e) { - // There is no point in waiting as shard is now closed . - return; - } - } - } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index c9a238c6e3350..ec1163fe91b6c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -31,6 +31,7 @@ import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -897,13 +898,15 @@ public static void remoteDirectoryCleanup( RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, String remoteStoreRepoForIndex, String indexUUID, - ShardId shardId + ShardId shardId, + RemoteStorePathStrategy pathStrategy ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + pathStrategy ); remoteSegmentStoreDirectory.deleteStaleSegments(0); remoteSegmentStoreDirectory.deleteIfEmpty(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index eca8d9ec702e1..e462f6d4ac011 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -9,9 +9,11 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; @@ -23,16 +25,20 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; + /** * Factory for a remote store directory * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.DirectoryFactory { - private static final String SEGMENTS = "segments"; - private final Supplier repositoriesService; private final ThreadPool threadPool; @@ -46,29 +52,52 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - return newDirectory(repositoryName, indexUUID, path.getShardId()); + return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathStrategy()); } - public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) + throws IOException { + assert Objects.nonNull(pathStrategy); try (Repository repository = repositoriesService.get().repository(repositoryName)) { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); - BlobPath commonBlobPath = blobStoreRepository.basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); + BlobPath repositoryBasePath = blobStoreRepository.basePath(); + String shardIdStr = String.valueOf(shardId.id()); + RemoteStorePathStrategy.PathInput dataPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(SEGMENTS) + .dataType(DATA) + .build(); + // Derive the path for data directory of SEGMENTS + BlobPath dataPath = pathStrategy.generatePath(dataPathInput); RemoteDirectory dataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), + blobStoreRepository.blobStore().blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers ); - RemoteDirectory metadataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("metadata")) - ); + + RemoteStorePathStrategy.PathInput mdPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(SEGMENTS) + .dataType(METADATA) + .build(); + // Derive the path for metadata directory of SEGMENTS + BlobPath mdPath = pathStrategy.generatePath(mdPathInput); + RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); + + // The path for lock is derived within the RemoteStoreLockManagerFactory RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, indexUUID, - String.valueOf(shardId.id()) + shardIdStr, + pathStrategy ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 1930a37daa400..56fc5b1ffa90d 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -385,7 +385,7 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio * @return {@link Map} map file name to {@link StoreFileMetadata}. */ public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); failIfCorrupted(); try { return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; @@ -893,7 +893,7 @@ public void beforeClose() { * @throws IOException when there is an IO error committing. */ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, long processedCheckpoint) throws IOException { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode() || indexSettings.isAssignedOnRemoteNode(); metadataLock.writeLock().lock(); try { final Map userData = new HashMap<>(latestSegmentInfos.getUserData()); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index 00666ada11983..45d466d3a8ce8 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -11,15 +11,18 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; import java.util.function.Supplier; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; + /** * Factory for remote store lock manager * @@ -27,34 +30,42 @@ */ @PublicApi(since = "2.8.0") public class RemoteStoreLockManagerFactory { - private static final String SEGMENTS = "segments"; - private static final String LOCK_FILES = "lock_files"; private final Supplier repositoriesService; public RemoteStoreLockManagerFactory(Supplier repositoriesService) { this.repositoriesService = repositoriesService; } - public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { - return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId); + public RemoteStoreLockManager newLockManager( + String repositoryName, + String indexUUID, + String shardId, + RemoteStorePathStrategy pathStrategy + ) { + return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathStrategy); } public static RemoteStoreMetadataLockManager newLockManager( RepositoriesService repositoriesService, String repositoryName, String indexUUID, - String shardId - ) throws IOException { + String shardId, + RemoteStorePathStrategy pathStrategy + ) { try (Repository repository = repositoriesService.repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath shardLevelBlobPath = ((BlobStoreRepository) repository).basePath().add(indexUUID).add(shardId).add(SEGMENTS); - RemoteBufferedOutputDirectory shardMDLockDirectory = createRemoteBufferedOutputDirectory( - repository, - shardLevelBlobPath, - LOCK_FILES - ); + BlobPath repositoryBasePath = ((BlobStoreRepository) repository).basePath(); - return new RemoteStoreMetadataLockManager(shardMDLockDirectory); + RemoteStorePathStrategy.PathInput lockFilesPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(SEGMENTS) + .dataType(LOCK_FILES) + .build(); + BlobPath lockDirectoryPath = pathStrategy.generatePath(lockFilesPathInput); + BlobContainer lockDirectoryBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(lockDirectoryPath); + return new RemoteStoreMetadataLockManager(new RemoteBufferedOutputDirectory(lockDirectoryBlobContainer)); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be present to acquire/release lock", e); } @@ -65,14 +76,4 @@ public static RemoteStoreMetadataLockManager newLockManager( public Supplier getRepositoriesService() { return repositoriesService; } - - private static RemoteBufferedOutputDirectory createRemoteBufferedOutputDirectory( - Repository repository, - BlobPath commonBlobPath, - String extention - ) { - BlobPath extendedPath = commonBlobPath.add(extention); - BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); - return new RemoteBufferedOutputDirectory(dataBlobContainer); - } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 7b969a37e4aa6..bb7769eae1bf5 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; @@ -18,6 +19,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; @@ -38,6 +40,7 @@ import java.util.HashSet; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -47,6 +50,10 @@ import java.util.function.LongConsumer; import java.util.function.LongSupplier; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; + /** * A Translog implementation which syncs local FS with a remote store * The current impl uploads translog , ckp and metadata to remote store @@ -74,7 +81,6 @@ public class RemoteFsTranslog extends Translog { private static final int REMOTE_DELETION_PERMITS = 2; private static final int DOWNLOAD_RETRIES = 2; - public static final String TRANSLOG = "translog"; // Semaphore used to allow only single remote generation to happen at a time private final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); @@ -106,7 +112,8 @@ public RemoteFsTranslog( threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + indexSettings().getRemoteStorePathStrategy() ); try { download(translogTransferManager, location, logger); @@ -150,8 +157,14 @@ RemoteTranslogTransferTracker getRemoteTranslogTracker() { return remoteTranslogTransferTracker; } - public static void download(Repository repository, ShardId shardId, ThreadPool threadPool, Path location, Logger logger) - throws IOException { + public static void download( + Repository repository, + ShardId shardId, + ThreadPool threadPool, + Path location, + RemoteStorePathStrategy pathStrategy, + Logger logger + ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, "%s repository should be instance of BlobStoreRepository", @@ -167,7 +180,8 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + pathStrategy ); RemoteFsTranslog.download(translogTransferManager, location, logger); logger.trace(remoteTranslogTransferTracker.toString()); @@ -244,15 +258,30 @@ public static TranslogTransferManager buildTranslogTransferManager( ThreadPool threadPool, ShardId shardId, FileTransferTracker fileTransferTracker, - RemoteTranslogTransferTracker remoteTranslogTransferTracker + RemoteTranslogTransferTracker tracker, + RemoteStorePathStrategy pathStrategy ) { - return new TranslogTransferManager( - shardId, - new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), - blobStoreRepository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG), - fileTransferTracker, - remoteTranslogTransferTracker - ); + assert Objects.nonNull(pathStrategy); + String indexUUID = shardId.getIndex().getUUID(); + String shardIdStr = String.valueOf(shardId.id()); + RemoteStorePathStrategy.PathInput dataPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(blobStoreRepository.basePath()) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(TRANSLOG) + .dataType(DATA) + .build(); + BlobPath dataPath = pathStrategy.generatePath(dataPathInput); + RemoteStorePathStrategy.PathInput mdPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(blobStoreRepository.basePath()) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(TRANSLOG) + .dataType(METADATA) + .build(); + BlobPath mdPath = pathStrategy.generatePath(mdPathInput); + BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); + return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker); } @Override @@ -334,7 +363,7 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc } private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws IOException { - logger.trace("uploading translog for {} {}", primaryTerm, generation); + logger.trace("uploading translog for primary term {} generation {}", primaryTerm, generation); try ( TranslogCheckpointTransferSnapshot transferSnapshotProvider = new TranslogCheckpointTransferSnapshot.Builder( primaryTerm, @@ -524,7 +553,8 @@ private void deleteStaleRemotePrimaryTerms() { } } - public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy) + throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; // We use a dummy stats tracker to ensure the flow doesn't break. @@ -536,7 +566,8 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + pathStrategy ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 9f877e87415dd..7c50ed6ecd58f 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -525,7 +525,7 @@ TranslogWriter createWriter( tragedy, persistedSequenceNumberConsumer, bigArrays, - indexSettings.isRemoteTranslogStoreEnabled() + indexSettings.isAssignedOnRemoteNode() ); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 2f6055df87804..c9e07ca3ef8c1 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -58,7 +58,6 @@ public class TranslogTransferManager { private final TransferService transferService; private final BlobPath remoteDataTransferPath; private final BlobPath remoteMetadataTransferPath; - private final BlobPath remoteBaseTransferPath; private final FileTransferTracker fileTransferTracker; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; @@ -67,8 +66,6 @@ public class TranslogTransferManager { private static final int METADATA_FILES_TO_FETCH = 10; private final Logger logger; - private final static String METADATA_DIR = "metadata"; - private final static String DATA_DIR = "data"; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( new TranslogTransferMetadataHandler(), @@ -79,15 +76,15 @@ public class TranslogTransferManager { public TranslogTransferManager( ShardId shardId, TransferService transferService, - BlobPath remoteBaseTransferPath, + BlobPath remoteDataTransferPath, + BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { this.shardId = shardId; this.transferService = transferService; - this.remoteBaseTransferPath = remoteBaseTransferPath; - this.remoteDataTransferPath = remoteBaseTransferPath.add(DATA_DIR); - this.remoteMetadataTransferPath = remoteBaseTransferPath.add(METADATA_DIR); + this.remoteDataTransferPath = remoteDataTransferPath; + this.remoteMetadataTransferPath = remoteMetadataTransferPath; this.fileTransferTracker = fileTransferTracker; this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; @@ -456,17 +453,27 @@ public void onFailure(Exception e) { ); } + /** + * Deletes all the translog content related to the underlying shard. + */ public void delete() { - // cleans up all the translog contents in async fashion - transferService.deleteAsync(ThreadPool.Names.REMOTE_PURGE, remoteBaseTransferPath, new ActionListener<>() { + // Delete the translog data content from the remote store. + delete(remoteDataTransferPath); + // Delete the translog metadata content from the remote store. + delete(remoteMetadataTransferPath); + } + + private void delete(BlobPath path) { + // cleans up all the translog contents in async fashion for the given path + transferService.deleteAsync(ThreadPool.Names.REMOTE_PURGE, path, new ActionListener<>() { @Override public void onResponse(Void unused) { - logger.info("Deleted all remote translog data"); + logger.info("Deleted all remote translog data at path={}", path); } @Override public void onFailure(Exception e) { - logger.error("Exception occurred while cleaning translog", e); + logger.error(new ParameterizedMessage("Exception occurred while cleaning translog at path={}", path), e); } }); } diff --git a/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java new file mode 100644 index 0000000000000..d3937600a848b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RemoteStoreSettings} instance containing all defaults + * + * @opensearch.internal + */ +public final class DefaultRemoteStoreSettings { + private DefaultRemoteStoreSettings() {} + + public static final RemoteStoreSettings INSTANCE = new RemoteStoreSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index b86e98f4ebcbc..fee2888c7a3fb 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -46,6 +46,7 @@ import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.BooleanFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.DocCountFieldMapper; @@ -168,6 +169,7 @@ public static Map getMappers(List mappe mappers.put(FieldAliasMapper.CONTENT_TYPE, new FieldAliasMapper.TypeParser()); mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); mappers.put(FlatObjectFieldMapper.CONTENT_TYPE, FlatObjectFieldMapper.PARSER); + mappers.put(ConstantKeywordFieldMapper.CONTENT_TYPE, new ConstantKeywordFieldMapper.TypeParser()); for (MapperPlugin mapperPlugin : mapperPlugins) { for (Map.Entry entry : mapperPlugin.getMappers().entrySet()) { diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 0e64894e6f708..4f68c03913199 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -54,6 +54,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; @@ -123,7 +124,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -150,6 +151,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; @@ -201,6 +203,7 @@ import static org.opensearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.opensearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION; import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; /** @@ -247,17 +250,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * Used to specify the default translog buffer interval for remote store backed indexes. - */ - public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( - "cluster.remote_store.translog.buffer_interval", - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, - Property.NodeScope, - Property.Dynamic - ); - /** * This setting is used to set the refresh interval when the {@code index.refresh_interval} index setting is not * provided during index creation or when the existing {@code index.refresh_interval} index setting is set as null. @@ -318,10 +310,10 @@ public class IndicesService extends AbstractLifecycleComponent * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( + public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( "cluster.remote_store.index.path.prefix.type", - RemoteStorePathType.FIXED.toString(), - RemoteStorePathType::parseString, + PathType.FIXED.toString(), + PathType::parseString, Property.NodeScope, Property.Dynamic ); @@ -364,7 +356,7 @@ public class IndicesService extends AbstractLifecycleComponent private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; private final RecoverySettings recoverySettings; - + private final RemoteStoreSettings remoteStoreSettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); @@ -373,8 +365,6 @@ public class IndicesService extends AbstractLifecycleComponent private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; - private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final SearchRequestStats searchRequestStats; @Override @@ -409,7 +399,8 @@ public IndicesService( SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, RecoverySettings recoverySettings, - CacheService cacheService + CacheService cacheService, + RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.threadPool = threadPool; @@ -503,15 +494,18 @@ protected void closeInternal() { this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; - this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory); + this.translogFactorySupplier = getTranslogFactorySupplier( + repositoriesServiceSupplier, + threadPool, + remoteStoreStatsTrackerFactory, + settings + ); this.searchRequestStats = searchRequestStats; this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; } /** @@ -533,7 +527,8 @@ private void onRefreshIntervalUpdate(TimeValue clusterDefaultRefreshInterval) { private static BiFunction getTranslogFactorySupplier( Supplier repositoriesServiceSupplier, ThreadPool threadPool, - RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + Settings settings ) { return (indexSettings, shardRouting) -> { if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { @@ -543,6 +538,13 @@ private static BiFunction getTrans indexSettings.getRemoteStoreTranslogRepository(), remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) ); + } else if (isRemoteDataAttributePresent(settings) && shardRouting.primary()) { + return new RemoteBlobStoreInternalTranslogFactory( + repositoriesServiceSupplier, + threadPool, + RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(indexSettings.getNodeSettings()), + remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) + ); } return new InternalTranslogFactory(); }; @@ -908,8 +910,8 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, - this::getClusterRemoteTranslogBufferInterval, - this.recoverySettings + this.recoverySettings, + this.remoteStoreSettings ); } @@ -932,7 +934,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isRemoteSnapshot()) { return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); } - if (idxSettings.isSegRepEnabled()) { + if (idxSettings.isSegRepEnabledOrRemoteNode() || idxSettings.isAssignedOnRemoteNode()) { return new NRTReplicationEngineFactory(); } return new InternalEngineFactory(); @@ -1020,7 +1022,8 @@ public IndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final DiscoveryNodes discoveryNodes ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -1032,7 +1035,11 @@ public IndexShard createShard( globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + repositoriesService, + targetNode, + sourceNode, + discoveryNodes ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { @@ -2026,12 +2033,7 @@ private TimeValue getClusterDefaultRefreshInterval() { return this.clusterDefaultRefreshInterval; } - // Exclusively for testing, please do not use it elsewhere. - public TimeValue getClusterRemoteTranslogBufferInterval() { - return clusterRemoteTranslogBufferInterval; - } - - private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { - this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + public RemoteStoreSettings getRemoteStoreSettings() { + return this.remoteStoreSettings; } } diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java new file mode 100644 index 0000000000000..5e6dba2b398db --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexSettings; + +/** + * Settings for remote store + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class RemoteStoreSettings { + + /** + * Used to specify the default translog buffer interval for remote store backed indexes. + */ + public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.buffer_interval", + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, + Property.NodeScope, + Property.Dynamic + ); + + /** + * Controls minimum number of metadata files to keep in remote segment store. + * {@code value < 1} will disable deletion of stale segment metadata files. + */ + public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( + "cluster.remote_store.index.segment_metadata.retention.max_count", + 10, + -1, + v -> { + if (v == 0) { + throw new IllegalArgumentException( + "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" + ); + } + }, + Property.NodeScope, + Property.Dynamic + ); + + private volatile TimeValue clusterRemoteTranslogBufferInterval; + private volatile int minRemoteSegmentMetadataFiles; + + public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + this::setClusterRemoteTranslogBufferInterval + ); + + minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + this::setMinRemoteSegmentMetadataFiles + ); + } + + // Exclusively for testing, please do not use it elsewhere. + public TimeValue getClusterRemoteTranslogBufferInterval() { + return clusterRemoteTranslogBufferInterval; + } + + private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { + this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + } + + private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { + this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; + } + + public int getMinRemoteSegmentMetadataFiles() { + return this.minRemoteSegmentMetadataFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 7fb8b172ae352..2c3ffcdd9e0ba 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -680,7 +680,8 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR retentionLeaseSyncer, nodes.getLocalNode(), sourceNode, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + nodes ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); @@ -714,7 +715,8 @@ private void updateShard( primaryReplicaSyncer::resync, clusterState.version(), inSyncIds, - indexShardRoutingTable + indexShardRoutingTable, + nodes ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); @@ -922,7 +924,8 @@ void updateShardState( BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable + IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException; } @@ -1040,7 +1043,8 @@ T createShard( RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, - RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + DiscoveryNodes discoveryNodes ) throws IOException; /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java index 29ee097d36cac..fac6924435cf3 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java @@ -161,7 +161,7 @@ private void innerWriteFileChunk(StoreFileMetadata fileMetadata, long position, + "] in " + Arrays.toString(store.directory().listAll()); // With Segment Replication, we will fsync after a full commit has been received. - if (store.indexSettings().isSegRepEnabled() == false) { + if (store.indexSettings().isSegRepEnabledOrRemoteNode() == false) { store.directory().sync(Collections.singleton(temporaryFileName)); } IndexOutput remove = removeOpenIndexOutputs(name); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index cb2bedf00de99..30f517fda9931 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -377,7 +377,7 @@ private Tuple createRecovery request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime), - shard.isRemoteTranslogEnabled() + shard.isRemoteTranslogEnabled() || request.targetNode().isRemoteStoreNode() ); handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 4232d32987e86..c24840d0c1333 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -189,7 +189,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.start( - new RecoveryTarget(indexShard, sourceNode, listener), + new RecoveryTarget(indexShard, sourceNode, listener, threadPool), recoverySettings.activityTimeout() ); // we fork off quickly here and go async but this is called from the cluster state applier thread too and that can cause @@ -246,7 +246,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); - if (hasRemoteSegmentStore) { + if (hasRemoteSegmentStore || indexShard.isRemoteSeeded()) { // ToDo: This is a temporary mitigation to not fail the peer recovery flow in case there is // an exception while downloading segments from remote store. For remote backed indexes, we // plan to revamp this flow so that node-node segment copy will not happen. @@ -260,7 +260,8 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); } } - final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); + final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false + && indexShard.indexSettings().isAssignedOnRemoteNode(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog || hasRemoteSegmentStore) == false; final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 2b41eb125d808..53b42347aa30d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -159,25 +159,6 @@ public class RecoverySettings { Property.NodeScope ); - /** - * Controls minimum number of metadata files to keep in remote segment store. - * {@code value < 1} will disable deletion of stale segment metadata files. - */ - public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( - "cluster.remote_store.index.segment_metadata.retention.max_count", - 10, - -1, - v -> { - if (v == 0) { - throw new IllegalArgumentException( - "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" - ); - } - }, - Property.NodeScope, - Property.Dynamic - ); - public static final Setting INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( "indices.recovery.internal_remote_upload_timeout", new TimeValue(1, TimeUnit.HOURS), @@ -199,7 +180,6 @@ public class RecoverySettings { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionRetryTimeout; private volatile TimeValue internalActionLongTimeout; - private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; private volatile TimeValue internalRemoteUploadTimeout; @@ -243,11 +223,6 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this::setInternalActionLongTimeout ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); - minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer( - CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, - this::setMinRemoteSegmentMetadataFiles - ); clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); } @@ -354,11 +329,4 @@ private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStre this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; } - private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { - this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; - } - - public int getMinRemoteSegmentMetadataFiles() { - return this.minRemoteSegmentMetadataFiles; - } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 7996c48b2b04b..abf9b1aaeb2cc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -841,9 +841,11 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); - final Runnable forceSegRepRunnable = shard.indexSettings().isSegRepEnabled() - ? recoveryTarget::forceSegmentFileSync - : () -> {}; + final Runnable forceSegRepRunnable = shard.indexSettings().isSegRepEnabledOrRemoteNode() + || (request.sourceNode().isRemoteStoreNode() && request.targetNode().isRemoteStoreNode()) + ? recoveryTarget::forceSegmentFileSync + : () -> {}; + // TODO: make relocated async // this acquires all IndexShard operation permits and will thus delay new recoveries until it is done cancellableThreads.execute( @@ -855,7 +857,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis */ } else { // Force round of segment replication to update its checkpoint to primary's - if (shard.indexSettings().isSegRepEnabled()) { + if (shard.indexSettings().isSegRepEnabledOrRemoteNode()) { cancellableThreads.execute(recoveryTarget::forceSegmentFileSync); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java index ea13ca18bbfca..96e85154e6248 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java @@ -23,7 +23,7 @@ public static RecoverySourceHandler create( StartRecoveryRequest request, RecoverySettings recoverySettings ) { - boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false && shard.isRemoteTranslogEnabled(); + boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false && request.targetNode().isRemoteStoreNode(); if (isReplicaRecoveryWithRemoteTranslog) { return new RemoteStorePeerRecoverySourceHandler( shard, diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index f3b5d0d790f83..f47b082de3856 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -61,6 +61,7 @@ import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTarget; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.channels.FileChannel; @@ -87,16 +88,20 @@ public class RecoveryTarget extends ReplicationTarget implements RecoveryTargetH // latch that can be used to blockingly wait for RecoveryTarget to be closed private final CountDownLatch closedLatch = new CountDownLatch(1); + private final ThreadPool threadPool; + /** * Creates a new recovery target object that represents a recovery to the provided shard. * - * @param indexShard local shard where we want to recover to - * @param sourceNode source node of the recovery where we recover from - * @param listener called when recovery is completed/failed + * @param indexShard local shard where we want to recover to + * @param sourceNode source node of the recovery where we recover from + * @param listener called when recovery is completed/failed + * @param threadPool threadpool instance */ - public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, ReplicationListener listener) { + public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, ReplicationListener listener, ThreadPool threadPool) { super("recovery_status", indexShard, indexShard.recoveryState().getIndex(), listener); this.sourceNode = sourceNode; + this.threadPool = threadPool; indexShard.recoveryStats().incCurrentAsTarget(); final String tempFilePrefix = getPrefix() + UUIDs.randomBase64UUID() + "."; this.multiFileWriter = new MultiFileWriter(indexShard.store(), stateIndex, tempFilePrefix, logger, this::ensureRefCount); @@ -108,7 +113,7 @@ public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, Replicati * @return a copy of this recovery target */ public RecoveryTarget retryCopy() { - return new RecoveryTarget(indexShard, sourceNode, listener); + return new RecoveryTarget(indexShard, sourceNode, listener, threadPool); } public String source() { @@ -208,7 +213,23 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener { state().getIndex().setFileDetailsComplete(); // ops-based recoveries don't send the file details state().getTranslog().totalOperations(totalTranslogOps); + // Cleanup remote contents before opening new translog. + // This prevents reading from any old Translog UUIDs during re-seeding + // (situation in which primary fails over to docrep replica and is re-seeded to remote again) + // which might end up causing a TranslogCorruptedException + if (indexShard.shouldSeedRemoteStore()) { + assert indexShard.routingEntry().primary() : "Remote seeding should only true be for primary shard copy"; + indexShard.deleteRemoteStoreContents(); + } indexShard().openEngineAndSkipTranslogRecovery(); + // upload to remote store in migration for primary shard + if (indexShard.shouldSeedRemoteStore()) { + // This cleans up remote translog's 0 generation, as we don't want to get that uploaded + indexShard.sync(); + threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> { indexShard.refresh("remote store migration"); }); + indexShard.waitForRemoteStoreSync(this::setLastAccessTime); + logger.info("Remote Store is now seeded for {}", indexShard.shardId()); + } return null; }); } @@ -360,7 +381,7 @@ public void cleanFiles( // Replicas for segment replication or remote snapshot indices do not create // their own commit points and therefore do not modify the commit user data // in their store. In these cases, reuse the primary's translog UUID. - final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabled() + final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabledOrRemoteNode() || indexShard.indexSettings().isRemoteSnapshot(); if (reuseTranslogUUID) { final String translogUUID = store.getMetadata().getCommitUserData().get(TRANSLOG_UUID_KEY); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 852003c9f3e4d..81eb38757aebe 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -38,7 +38,7 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - if (shard.indexSettings().isSegRepWithRemoteEnabled()) { + if (shard.indexSettings().isAssignedOnRemoteNode()) { return new RemoteStoreReplicationSource(shard); } else { return new PrimaryShardReplicationSource( diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 4062f9702fb3a..a393faabae0ea 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -175,7 +175,7 @@ public void clusterChanged(ClusterChangedEvent event) { // we need to ensure its state has cleared up in ongoing replications. if (event.routingTableChanged()) { for (IndexService indexService : indicesService) { - if (indexService.getIndexSettings().isSegRepEnabled()) { + if (indexService.getIndexSettings().isSegRepEnabledOrRemoteNode()) { for (IndexShard indexShard : indexService) { if (indexShard.routingEntry().primary()) { final IndexMetadata indexMetadata = indexService.getIndexSettings().getIndexMetadata(); @@ -221,7 +221,7 @@ protected void doClose() throws IOException { */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { - if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { + if (indexShard != null && indexShard.indexSettings().isSegRepEnabledOrRemoteNode()) { ongoingSegmentReplications.cancel(indexShard, "shard is closed"); } } @@ -231,7 +231,10 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh */ @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { - if (indexShard != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { + if (indexShard != null + && indexShard.indexSettings().isSegRepEnabledOrRemoteNode() + && oldRouting.primary() == false + && newRouting.primary()) { ongoingSegmentReplications.cancel(indexShard.routingEntry().allocationId().getId(), "Relocating primary shard."); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index f28f829545d59..4942d39cfa48a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -168,7 +168,8 @@ protected void doClose() throws IOException { public void clusterChanged(ClusterChangedEvent event) { if (event.routingTableChanged()) { for (IndexService indexService : indicesService) { - if (indexService.getIndexSettings().isSegRepEnabled() && event.indexRoutingTableChanged(indexService.index().getName())) { + if (indexService.getIndexSettings().isSegRepEnabledOrRemoteNode() + && event.indexRoutingTableChanged(indexService.index().getName())) { for (IndexShard shard : indexService) { if (shard.routingEntry().primary() == false) { // for this shard look up its primary routing, if it has completed a relocation trigger replication @@ -197,7 +198,7 @@ public void clusterChanged(ClusterChangedEvent event) { */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { - if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { + if (indexShard != null && indexShard.indexSettings().isSegRepEnabledOrRemoteNode()) { onGoingReplications.cancelForShard(indexShard.shardId(), "Shard closing"); latestReceivedCheckpoint.remove(shardId); } @@ -209,7 +210,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh */ @Override public void afterIndexShardStarted(IndexShard indexShard) { - if (indexShard.indexSettings().isSegRepEnabled() && indexShard.routingEntry().primary() == false) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.routingEntry().primary() == false) { processLatestReceivedCheckpoint(indexShard, Thread.currentThread()); } } @@ -219,7 +220,10 @@ public void afterIndexShardStarted(IndexShard indexShard) { */ @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { - if (oldRouting != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { + if (oldRouting != null + && indexShard.indexSettings().isSegRepEnabledOrRemoteNode() + && oldRouting.primary() == false + && newRouting.primary()) { onGoingReplications.cancelForShard(indexShard.shardId(), "Shard has been promoted to primary"); latestReceivedCheckpoint.remove(indexShard.shardId()); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 821ae42e31881..8f39aa194b06c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -98,7 +98,7 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.FULL_REPLICATION; } return super.getReplicationMode(indexShard); @@ -199,6 +199,12 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh Objects.requireNonNull(replica); ActionListener.completeWith(listener, () -> { logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); + // Condition for ensuring that we ignore Segrep checkpoints received on Docrep shard copies. + // This case will hit iff the replica hosting node is not remote enabled and replication type != SEGMENT + if (replica.indexSettings().isAssignedOnRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { + logger.trace("Received segrep checkpoint on a docrep shard copy during an ongoing remote migration. NoOp."); + return new ReplicaResult(); + } if (request.getCheckpoint().getShardId().equals(replica.shardId())) { replicationService.onNewCheckpoint(request.getCheckpoint(), replica); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index ea449afe1c811..7fa2b6c8ff497 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -153,6 +153,7 @@ import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -202,7 +203,7 @@ import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.plugins.SearchPlugin; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; @@ -788,6 +789,8 @@ protected Node( final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool @@ -825,7 +828,8 @@ protected Node( searchRequestStats, remoteStoreStatsTrackerFactory, recoverySettings, - cacheService + cacheService, + remoteStoreSettings ); final IngestService ingestService = new IngestService( @@ -946,9 +950,9 @@ protected Node( admissionControlService ); - final Collection secureTransportSettingsProviders = pluginsService.filterPlugins(Plugin.class) + final Collection secureSettingsFactories = pluginsService.filterPlugins(Plugin.class) .stream() - .map(p -> p.getSecureSettingFactory(settings).flatMap(f -> f.getSecureTransportSettingsProvider(settings))) + .map(p -> p.getSecureSettingFactory(settings)) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toList()); @@ -968,7 +972,7 @@ protected Node( clusterService.getClusterSettings(), tracer, transportInterceptors, - secureTransportSettingsProviders + secureSettingsFactories ); Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 7575c6ff5fb34..a3bfe1195d8cc 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -168,6 +168,14 @@ public static boolean isRemoteClusterStateAttributePresent(Settings settings) { .isEmpty() == false; } + public static String getRemoteStoreSegmentRepo(Settings settings) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY); + } + + public static String getRemoteStoreTranslogRepo(Settings settings) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY); + } + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteClusterStateAttributePresent(settings); diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 679833c9f6e0d..138ef6f71280d 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -139,7 +139,7 @@ default Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { return Collections.emptyMap(); diff --git a/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java new file mode 100644 index 0000000000000..ff86cbc04e240 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.transport.TransportAdapterProvider; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Collections; +import java.util.Optional; + +/** + * A provider for security related settings for HTTP transports. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface SecureHttpTransportSettingsProvider { + /** + * Collection of additional {@link TransportAdapterProvider}s that are specific to particular HTTP transport + * @param settings settings + * @return a collection of additional {@link TransportAdapterProvider}s + */ + default Collection> getHttpTransportAdapterProviders(Settings settings) { + return Collections.emptyList(); + } + + /** + * If supported, builds the {@link TransportExceptionHandler} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link TransportExceptionHandler} instance + */ + Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); + + /** + * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link SSLEngine} instance + * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + */ + Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; +} diff --git a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java index b98d9cf51c129..ec2276ecc62ef 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java +++ b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java @@ -26,4 +26,11 @@ public interface SecureSettingsFactory { * @return optionally, the instance of the {@link SecureTransportSettingsProvider} */ Optional getSecureTransportSettingsProvider(Settings settings); + + /** + * Creates (or provides pre-created) instance of the {@link SecureHttpTransportSettingsProvider} + * @param settings settings + * @return optionally, the instance of the {@link SecureHttpTransportSettingsProvider} + */ + Optional getSecureHttpTransportSettingsProvider(Settings settings); } diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java index 6d038ed30c8ff..5b7402a01f82d 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -10,12 +10,14 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; -import org.opensearch.http.HttpServerTransport; -import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportAdapterProvider; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; +import java.util.Collection; +import java.util.Collections; import java.util.Optional; /** @@ -26,57 +28,30 @@ @ExperimentalApi public interface SecureTransportSettingsProvider { /** - * An exception handler for errors that might happen while secure transport handle the requests. - * - * @see SslExceptionHandler - * - * @opensearch.experimental - */ - @ExperimentalApi - @FunctionalInterface - interface ServerExceptionHandler { - static ServerExceptionHandler NOOP = t -> {}; - - /** - * Handler for errors happening during the server side processing of the requests - * @param t the error - */ - void onError(Throwable t); - } - - /** - * If supported, builds the {@link ServerExceptionHandler} instance for {@link HttpServerTransport} instance - * @param settings settings - * @param transport {@link HttpServerTransport} instance - * @return if supported, builds the {@link ServerExceptionHandler} instance - */ - Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); - - /** - * If supported, builds the {@link ServerExceptionHandler} instance for {@link TcpTransport} instance + * Collection of additional {@link TransportAdapterProvider}s that are specific to particular transport * @param settings settings - * @param transport {@link TcpTransport} instance - * @return if supported, builds the {@link ServerExceptionHandler} instance + * @return a collection of additional {@link TransportAdapterProvider}s */ - Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport); + default Collection> getTransportAdapterProviders(Settings settings) { + return Collections.emptyList(); + } /** - * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * If supported, builds the {@link TransportExceptionHandler} instance for {@link Transport} instance * @param settings settings - * @param transport {@link HttpServerTransport} instance - * @return if supported, builds the {@link SSLEngine} instance - * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + * @param transport {@link Transport} instance + * @return if supported, builds the {@link TransportExceptionHandler} instance */ - Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; + Optional buildServerTransportExceptionHandler(Settings settings, Transport transport); /** - * If supported, builds the {@link SSLEngine} instance for {@link TcpTransport} instance + * If supported, builds the {@link SSLEngine} instance for {@link Transport} instance * @param settings settings - * @param transport {@link TcpTransport} instance + * @param transport {@link Transport} instance * @return if supported, builds the {@link SSLEngine} instance * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built */ - Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException; + Optional buildSecureServerTransportEngine(Settings settings, Transport transport) throws SSLException; /** * If supported, builds the {@link SSLEngine} instance for client transport instance diff --git a/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java b/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java new file mode 100644 index 0000000000000..a6b935a6b97bc --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * An exception handler for errors that might happen while secure transport handle the requests. + * + * @see SslExceptionHandler + * + * @opensearch.experimental + */ +@ExperimentalApi +@FunctionalInterface +public interface TransportExceptionHandler { + static TransportExceptionHandler NOOP = t -> {}; + + /** + * Handler for errors happening during the server side processing of the requests + * @param t the error + */ + void onError(Throwable t); +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java index 5c180346c05e1..7ad0715a2a38e 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java @@ -67,7 +67,8 @@ private void applyForTransportLayer(String actionName, AdmissionControlActionTyp throw new OpenSearchRejectedExecutionException( String.format( Locale.ROOT, - "CPU usage admission controller rejected the request for action [%s] as CPU limit reached", + "CPU usage admission controller rejected the request for action [%s] as CPU limit reached for action-type [%s]", + actionName, admissionControlActionType.name() ) ); @@ -112,6 +113,8 @@ private long getCpuRejectionThreshold(AdmissionControlActionType admissionContro return this.settings.getSearchCPULimit(); case INDEXING: return this.settings.getIndexingCPULimit(); + case CLUSTER_ADMIN: + return this.settings.getClusterAdminCPULimit(); default: throw new IllegalArgumentException( String.format( diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java index ad6cc3ff378f0..d03b2050cd5f3 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java @@ -68,7 +68,8 @@ private void applyForTransportLayer(String actionName, AdmissionControlActionTyp throw new OpenSearchRejectedExecutionException( String.format( Locale.ROOT, - "Io usage admission controller rejected the request for action [%s] as IO limit reached", + "IO usage admission controller rejected the request for action [%s] as IO limit reached for action-type [%s]", + actionName, admissionControlActionType.name() ) ); @@ -113,6 +114,8 @@ private long getIoRejectionThreshold(AdmissionControlActionType admissionControl return this.settings.getSearchIOUsageLimit(); case INDEXING: return this.settings.getIndexingIOUsageLimit(); + case CLUSTER_ADMIN: + return this.settings.getClusterAdminIOUsageLimit(); default: throw new IllegalArgumentException( String.format( diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java index 8cf6e973ceb64..6acc440180281 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java @@ -15,7 +15,8 @@ */ public enum AdmissionControlActionType { INDEXING("indexing"), - SEARCH("search"); + SEARCH("search"), + CLUSTER_ADMIN("cluster_admin"); private final String type; @@ -38,6 +39,8 @@ public static AdmissionControlActionType fromName(String name) { return INDEXING; case "search": return SEARCH; + case "cluster_admin": + return CLUSTER_ADMIN; default: throw new IllegalArgumentException("Not Supported TransportAction Type: " + name); } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java index 1bddd1446a4c4..30012176d59af 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java @@ -30,6 +30,8 @@ public static class Defaults { private AdmissionControlMode transportLayerMode; private Long searchCPULimit; private Long indexingCPULimit; + private Long clusterInfoCPULimit; + /** * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set * rejection will be performed, otherwise only rejection metrics will be populated. @@ -62,14 +64,24 @@ public static class Defaults { Setting.Property.NodeScope ); + public static final Setting CLUSTER_ADMIN_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.cluster.admin.cpu_usage.limit", + Defaults.CPU_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + // currently limited to one setting will add further more settings in follow-up PR's public CpuBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { this.transportLayerMode = CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); clusterSettings.addSettingsUpdateConsumer(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); this.searchCPULimit = SEARCH_CPU_USAGE_LIMIT.get(settings); this.indexingCPULimit = INDEXING_CPU_USAGE_LIMIT.get(settings); + this.clusterInfoCPULimit = CLUSTER_ADMIN_CPU_USAGE_LIMIT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDEXING_CPU_USAGE_LIMIT, this::setIndexingCPULimit); clusterSettings.addSettingsUpdateConsumer(SEARCH_CPU_USAGE_LIMIT, this::setSearchCPULimit); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ADMIN_CPU_USAGE_LIMIT, this::setClusterInfoCPULimit); + } private void setTransportLayerMode(AdmissionControlMode admissionControlMode) { @@ -88,6 +100,10 @@ public Long getIndexingCPULimit() { return indexingCPULimit; } + public Long getClusterAdminCPULimit() { + return clusterInfoCPULimit; + } + public void setIndexingCPULimit(Long indexingCPULimit) { this.indexingCPULimit = indexingCPULimit; } @@ -95,4 +111,9 @@ public void setIndexingCPULimit(Long indexingCPULimit) { public void setSearchCPULimit(Long searchCPULimit) { this.searchCPULimit = searchCPULimit; } + + public void setClusterInfoCPULimit(Long clusterInfoCPULimit) { + this.clusterInfoCPULimit = clusterInfoCPULimit; + } + } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java index e58ed28d21605..e442906ea77d7 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java @@ -25,11 +25,14 @@ public class IoBasedAdmissionControllerSettings { */ public static class Defaults { public static final long IO_USAGE_LIMIT = 95; + public static final long CLUSTER_ADMIN_IO_USAGE_LIMIT = 100; + } private AdmissionControlMode transportLayerMode; private Long searchIOUsageLimit; private Long indexingIOUsageLimit; + private Long clusterAdminIOUsageLimit; /** * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set @@ -63,11 +66,22 @@ public static class Defaults { Setting.Property.NodeScope ); + /** + * This setting used to set the limits for cluster admin requests by default it will use default cluster_admin IO usage limit + */ + public static final Setting CLUSTER_ADMIN_IO_USAGE_LIMIT = Setting.longSetting( + "admission_control.cluster_admin.io_usage.limit", + Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT, + Setting.Property.Final, + Setting.Property.NodeScope + ); + public IoBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { this.transportLayerMode = IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); clusterSettings.addSettingsUpdateConsumer(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); this.searchIOUsageLimit = SEARCH_IO_USAGE_LIMIT.get(settings); this.indexingIOUsageLimit = INDEXING_IO_USAGE_LIMIT.get(settings); + this.clusterAdminIOUsageLimit = CLUSTER_ADMIN_IO_USAGE_LIMIT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDEXING_IO_USAGE_LIMIT, this::setIndexingIOUsageLimit); clusterSettings.addSettingsUpdateConsumer(SEARCH_IO_USAGE_LIMIT, this::setSearchIOUsageLimit); } @@ -95,4 +109,8 @@ public Long getIndexingIOUsageLimit() { public Long getSearchIOUsageLimit() { return searchIOUsageLimit; } + + public Long getClusterAdminIOUsageLimit() { + return clusterAdminIOUsageLimit; + } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 076173177feee..5aab02993db34 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -108,6 +108,8 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -669,7 +671,9 @@ public void cloneRemoteStoreIndexShardSnapshot( RemoteStoreLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepository, indexUUID, - String.valueOf(shardId.shardId()) + String.valueOf(shardId.shardId()), + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManger.cloneLock( FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), @@ -1107,7 +1111,8 @@ public static void remoteDirectoryCleanupAsync( String remoteStoreRepoForIndex, String indexUUID, ShardId shardId, - String threadPoolName + String threadPoolName, + RemoteStorePathStrategy pathStrategy ) { threadpool.executor(threadPoolName) .execute( @@ -1116,7 +1121,8 @@ public static void remoteDirectoryCleanupAsync( remoteDirectoryFactory, remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + pathStrategy ), indexUUID, shardId @@ -1147,7 +1153,9 @@ protected void releaseRemoteStoreLockAndCleanup( RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); @@ -1169,7 +1177,9 @@ protected void releaseRemoteStoreLockAndCleanup( remoteStoreRepoForIndex, indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), - ThreadPool.Names.REMOTE_PURGE + ThreadPool.Names.REMOTE_PURGE, + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); } } diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index e18a594236fc8..3552e32022b2c 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -40,6 +40,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; @@ -73,6 +74,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class BaseRestHandler implements RestHandler { public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting( @@ -195,8 +197,11 @@ protected final String unrecognized( /** * REST requests are handled by preparing a channel consumer that represents the execution of * the request against a channel. + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") protected interface RestChannelConsumer extends CheckedConsumer {} /** diff --git a/server/src/main/java/org/opensearch/rest/NamedRoute.java b/server/src/main/java/org/opensearch/rest/NamedRoute.java index 109f688a4924e..c2b3ea5fdeaaf 100644 --- a/server/src/main/java/org/opensearch/rest/NamedRoute.java +++ b/server/src/main/java/org/opensearch/rest/NamedRoute.java @@ -9,6 +9,7 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.transport.TransportService; import java.util.HashSet; @@ -20,8 +21,9 @@ /** * A named Route * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class NamedRoute extends RestHandler.Route { private static final String VALID_ACTION_NAME_PATTERN = "^[a-zA-Z0-9:/*_]*$"; diff --git a/server/src/main/java/org/opensearch/rest/RestChannel.java b/server/src/main/java/org/opensearch/rest/RestChannel.java index b8ce3e92e0098..b3ded1389f754 100644 --- a/server/src/main/java/org/opensearch/rest/RestChannel.java +++ b/server/src/main/java/org/opensearch/rest/RestChannel.java @@ -33,6 +33,7 @@ package org.opensearch.rest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,6 +45,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface RestChannel { XContentBuilder newBuilder() throws IOException; diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 294dc3ffbe329..877afdd951088 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -33,6 +33,7 @@ package org.opensearch.rest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.XContent; import org.opensearch.rest.RestRequest.Method; @@ -46,6 +47,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface RestHandler { @@ -180,8 +182,9 @@ public boolean allowSystemIndexAccessByDefault() { /** * Route for the request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class Route { protected final String path; @@ -231,7 +234,10 @@ public boolean equals(Object o) { /** * Represents an API that has been deprecated and is slated for removal. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") class DeprecatedRoute extends Route { private final String deprecationMessage; @@ -249,7 +255,10 @@ public String getDeprecationMessage() { /** * Represents an API that has had its {@code path} or {@code method} changed. Holds both the * new and previous {@code path} and {@code method} combination. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ReplacedRoute extends Route { private final String deprecatedPath; diff --git a/server/src/main/java/org/opensearch/rest/RestResponse.java b/server/src/main/java/org/opensearch/rest/RestResponse.java index 2eff746e8508c..482eb6b052e9b 100644 --- a/server/src/main/java/org/opensearch/rest/RestResponse.java +++ b/server/src/main/java/org/opensearch/rest/RestResponse.java @@ -33,6 +33,7 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; @@ -49,6 +50,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RestResponse { private Map> customHeaders; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 4cd10c6874e0a..4413c8eb370be 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -253,6 +253,10 @@ protected Table getTableWithHeader(final RestRequest request) { "search.point_in_time_total", "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" ); + table.addCell( + "search.search_idle_reactivate_count_total", + "alias:ssirct,searchSearchIdleReactivateCountTotal;default:false;text-align:right;desc:number of times a shard reactivated" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -427,6 +431,7 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getSearchIdleReactivateCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/main/java/org/opensearch/script/DerivedFieldScript.java b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java new file mode 100644 index 0000000000000..7f5b991950ec6 --- /dev/null +++ b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.script; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +/** + * Definition of Script for DerivedField. + * It will be used to execute scripts defined against derived fields of any type + * + * @opensearch.internal + */ +public abstract class DerivedFieldScript { + + public static final String[] PARAMETERS = {}; + public static final ScriptContext CONTEXT = new ScriptContext<>("derived_field", Factory.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); + + private static final Map> PARAMS_FUNCTIONS = Map.of( + "doc", + value -> value, + "_source", + value -> ((SourceLookup) value).loadSourceIfNeeded() + ); + + /** + * The generic runtime parameters for the script. + */ + private final Map params; + + /** + * A leaf lookup for the bound segment this script will operate on. + */ + private final LeafSearchLookup leafLookup; + + public DerivedFieldScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { + Map parameters = new HashMap<>(params); + this.leafLookup = lookup.getLeafSearchLookup(leafContext); + parameters.putAll(leafLookup.asMap()); + this.params = new DynamicMap(parameters, PARAMS_FUNCTIONS); + } + + protected DerivedFieldScript() { + params = null; + leafLookup = null; + } + + /** + * Return the parameters for this script. + */ + public Map getParams() { + return params; + } + + /** + * The doc lookup for the Lucene segment this script was created for. + */ + public Map> getDoc() { + return leafLookup.doc(); + } + + /** + * Set the current document to run the script on next. + */ + public void setDocument(int docid) { + leafLookup.setDocument(docid); + } + + public abstract Object execute(); + + /** + * A factory to construct {@link DerivedFieldScript} instances. + * + * @opensearch.internal + */ + public interface LeafFactory { + DerivedFieldScript newInstance(LeafReaderContext ctx) throws IOException; + } + + /** + * A factory to construct stateful {@link DerivedFieldScript} factories for a specific index. + * + * @opensearch.internal + */ + public interface Factory extends ScriptFactory { + LeafFactory newFactory(Map params, SearchLookup lookup); + } +} diff --git a/server/src/main/java/org/opensearch/script/ScriptModule.java b/server/src/main/java/org/opensearch/script/ScriptModule.java index a192e9553016b..c83a6e64d53eb 100644 --- a/server/src/main/java/org/opensearch/script/ScriptModule.java +++ b/server/src/main/java/org/opensearch/script/ScriptModule.java @@ -78,7 +78,8 @@ public class ScriptModule { ScriptedMetricAggContexts.MapScript.CONTEXT, ScriptedMetricAggContexts.CombineScript.CONTEXT, ScriptedMetricAggContexts.ReduceScript.CONTEXT, - IntervalFilterScript.CONTEXT + IntervalFilterScript.CONTEXT, + DerivedFieldScript.CONTEXT ).collect(Collectors.toMap(c -> c.name, Function.identity())); } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 061aa2f6e5896..c76ea71c0a094 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -962,6 +962,12 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * false: otherwise */ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { + // Do not use concurrent segment search for system indices or throttled requests. See: + // https://github.com/opensearch-project/OpenSearch/issues/12951 + if (indexShard.isSystem() || indexShard.indexSettings().isSearchThrottled()) { + return false; + } + if ((clusterService != null) && (concurrentSearchExecutor != null)) { return indexService.getIndexSettings() .getSettings() @@ -969,9 +975,8 @@ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearc IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) ); - } else { - return false; } + return false; } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java index 0bb2d1d7ca933..94e9ce5063277 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java @@ -15,9 +15,9 @@ import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Objects; /** * Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global @@ -56,17 +56,9 @@ public String getCollectorReason() { @Override public ReduceableSearchResult reduce(Collection collectors) throws IOException { - final List aggregators = context.bucketCollectorProcessor().toAggregators(collectors); - final List internals = new ArrayList<>(aggregators.size()); + final List internals = context.bucketCollectorProcessor().toInternalAggregations(collectors); + assert internals.stream().noneMatch(Objects::isNull); context.aggregations().resetBucketMultiConsumer(); - for (Aggregator aggregator : aggregators) { - try { - // post collection is called in ContextIndexSearcher after search on leaves are completed - internals.add(aggregator.buildTopLevel()); - } catch (IOException e) { - throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); - } - } final InternalAggregations internalAggregations = InternalAggregations.from(internals); return buildAggregationResult(internalAggregations); diff --git a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java index 8744d1f6a07d3..f4db8f61bf537 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java @@ -33,6 +33,7 @@ package org.opensearch.search.aggregations; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.SetOnce; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.core.ParseField; @@ -61,6 +62,8 @@ @PublicApi(since = "1.0.0") public abstract class Aggregator extends BucketCollector implements Releasable { + private final SetOnce internalAggregation = new SetOnce<>(); + /** * Parses the aggregation request and creates the appropriate aggregator factory for it. * @@ -83,6 +86,13 @@ public interface Parser { AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException; } + /** + * Returns the InternalAggregation stored during post collection + */ + public InternalAggregation getPostCollectionAggregation() { + return internalAggregation.get(); + } + /** * Return the name of this aggregator. */ @@ -185,13 +195,15 @@ public interface BucketComparator { /** * Build the result of this aggregation if it is at the "top level" - * of the aggregation tree. If, instead, it is a sub-aggregation of - * another aggregation then the aggregation that contains it will call - * {@link #buildAggregations(long[])}. + * of the aggregation tree and save it. This should get called + * during post collection. If, instead, it is a sub-aggregation + * of another aggregation then the aggregation that contains + * it will call {@link #buildAggregations(long[])}. */ public final InternalAggregation buildTopLevel() throws IOException { assert parent() == null; - return buildAggregations(new long[] { 0 })[0]; + this.internalAggregation.set(buildAggregations(new long[] { 0 })[0]); + return internalAggregation.get(); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java index 135fda71a757a..df05ce3f5c049 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java @@ -72,6 +72,15 @@ public void processPostCollection(Collector collectorTree) throws IOException { } } else if (currentCollector instanceof BucketCollector) { ((BucketCollector) currentCollector).postCollection(); + + // Perform build aggregation during post collection + if (currentCollector instanceof Aggregator) { + ((Aggregator) currentCollector).buildTopLevel(); + } else if (currentCollector instanceof MultiBucketCollector) { + for (Collector innerCollector : ((MultiBucketCollector) currentCollector).getCollectors()) { + collectors.offer(innerCollector); + } + } } } } @@ -106,4 +115,31 @@ public List toAggregators(Collection collectors) { } return aggregators; } + + /** + * Unwraps the input collection of {@link Collector} to get the list of the {@link InternalAggregation}. The + * input is expected to contain the collectors related to Aggregations only as that is passed to {@link AggregationCollectorManager} + * during the reduce phase. This list of {@link InternalAggregation} is used to optionally perform reduce at shard level before + * returning response to coordinator + * @param collectors collection of aggregation collectors to reduce + * @return list of unwrapped {@link InternalAggregation} + */ + public List toInternalAggregations(Collection collectors) throws IOException { + List internalAggregations = new ArrayList<>(); + + final Deque allCollectors = new LinkedList<>(collectors); + while (!allCollectors.isEmpty()) { + Collector currentCollector = allCollectors.pop(); + if (currentCollector instanceof InternalProfileCollector) { + currentCollector = ((InternalProfileCollector) currentCollector).getCollector(); + } + + if (currentCollector instanceof Aggregator) { + internalAggregations.add(((Aggregator) currentCollector).getPostCollectionAggregation()); + } else if (currentCollector instanceof MultiBucketCollector) { + allCollectors.addAll(Arrays.asList(((MultiBucketCollector) currentCollector).getCollectors())); + } + } + return internalAggregations; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java index 8814cc3c435e1..db2a11b47fbc3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java @@ -12,8 +12,10 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Objects; @@ -42,6 +44,19 @@ public Collector newCollector() throws IOException { } } + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + // If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in + // search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore + // we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree. + if (context.searcher().getLeafContexts().isEmpty()) { + for (Collector c : collectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } + } + return super.reduce(collectors); + } + @Override protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices diff --git a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java index 1461dd3009b44..35186422fceaa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java @@ -131,12 +131,10 @@ public static class MultiBucketConsumer implements IntConsumer { private final int limit; private final CircuitBreaker breaker; - // aggregations execute in a single thread for both sequential - // and concurrent search, so no atomic here + // count is currently only updated in final reduce phase which is executed in single thread for both concurrent and non-concurrent + // search private int count; - - // will be updated by multiple threads in concurrent search - // hence making it as LongAdder + // will be updated by multiple threads in concurrent search hence making it as LongAdder private final LongAdder callCount; private volatile boolean circuitBreakerTripped; private final int availProcessors; diff --git a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java index 8b0a1530b5505..5408a19c8ca50 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java @@ -12,8 +12,10 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Objects; @@ -42,6 +44,19 @@ public Collector newCollector() throws IOException { } } + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + // If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in + // search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore + // we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree. + if (context.searcher().getLeafContexts().isEmpty()) { + for (Collector c : collectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } + } + return super.reduce(collectors); + } + @Override protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 4af14ab014db5..6c5619a843fae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -40,6 +40,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.Map; /** @@ -80,7 +81,7 @@ protected Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - // See https://github.com/opensearch-project/OpenSearch/issues/12331 for details - return false; + // Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details + return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 69fda2f3f6133..9e40f7b4c9b3e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -45,6 +45,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; +import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongArray; @@ -94,8 +95,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final long valueCount; private final String fieldName; private Weight weight; - private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; + private final SetOnce dvs = new SetOnce<>(); protected int segmentsWithSingleValuedOrds = 0; protected int segmentsWithMultiValuedOrds = 0; @@ -129,11 +130,10 @@ public GlobalOrdinalsStringTermsAggregator( this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. this.valuesSource = valuesSource; final IndexReader reader = context.searcher().getIndexReader(); - final SortedSetDocValues values = reader.leaves().size() > 0 + final SortedSetDocValues values = !reader.leaves().isEmpty() ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); - this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get; if (remapGlobalOrds) { this.collectionStrategy = new RemapGlobalOrds(cardinality); @@ -885,7 +885,10 @@ PriorityQueue buildPriorityQueue(int size) { } StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException { - BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd)); + // Recreate DocValues as needed for concurrent segment search + SortedSetDocValues values = getDocValues(); + BytesRef term = BytesRef.deepCopyOf(values.lookupOrd(temp.globalOrd)); + StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); result.bucketOrd = temp.bucketOrd; result.docCountError = 0; @@ -1001,7 +1004,9 @@ BucketUpdater bucketUpdater(long owningBucketOrd) long subsetSize = subsetSize(owningBucketOrd); return (spare, globalOrd, bucketOrd, docCount) -> { spare.bucketOrd = bucketOrd; - oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); + // Recreate DocValues as needed for concurrent segment search + SortedSetDocValues values = getDocValues(); + oversizedCopy(values.lookupOrd(globalOrd), spare.termBytes); spare.subsetDf = docCount; spare.subsetSize = subsetSize; spare.supersetDf = backgroundFrequencies.freq(spare.termBytes); @@ -1086,4 +1091,18 @@ private void oversizedCopy(BytesRef from, BytesRef to) { * Predicate used for {@link #acceptedGlobalOrdinals} if there is no filter. */ private static final LongPredicate ALWAYS_TRUE = l -> true; + + /** + * If DocValues have not been initialized yet for reduce phase, create and set them. + */ + private SortedSetDocValues getDocValues() throws IOException { + if (dvs.get() == null) { + dvs.set( + !context.searcher().getIndexReader().leaves().isEmpty() + ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) + : DocValues.emptySortedSet() + ); + } + return dvs.get(); + } } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 1a5a9dc6d1f03..bf7045d43ba67 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -1842,7 +1842,7 @@ public String toString() { public String toString(Params params) { try { return XContentHelper.toXContent(this, MediaTypeRegistry.JSON, params, true).utf8ToString(); - } catch (IOException e) { + } catch (IOException | UnsupportedOperationException e) { throw new OpenSearchException(e); } } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 3d13378e58e5d..8f6001dc06755 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -113,6 +113,12 @@ public List toAggregators(Collection collectors) { // should not be called when there is no aggregation collector throw new IllegalStateException("Unexpected toAggregators call on NO_OP_BUCKET_COLLECTOR_PROCESSOR"); } + + @Override + public List toInternalAggregations(Collection collectors) { + // should not be called when there is no aggregation collector + throw new IllegalStateException("Unexpected toInternalAggregations call on NO_OP_BUCKET_COLLECTOR_PROCESSOR"); + } }; private final List releasables = new CopyOnWriteArrayList<>(); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index bf2c7fc74be92..ff393ecf19a99 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -216,7 +216,6 @@ public RestoreService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. restoreSnapshotTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.RESTORE_SNAPSHOT_KEY, true); - } /** @@ -452,6 +451,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); + createIndexService.addRemoteStorePathStrategyInCustomData(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 1c25d8c71f948..89f1ea142336e 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -381,7 +381,7 @@ private void snapshot( if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } - if (indexShard.indexSettings().isSegRepEnabled() && indexShard.isPrimaryMode() == false) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.isPrimaryMode() == false) { throw new IndexShardSnapshotFailedException( shardId, "snapshot triggered on a new primary following failover and cannot proceed until promotion is complete" diff --git a/server/src/main/java/org/opensearch/tasks/TaskListener.java b/server/src/main/java/org/opensearch/tasks/TaskListener.java index 97df8eacee584..7ec08984a7f07 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskListener.java +++ b/server/src/main/java/org/opensearch/tasks/TaskListener.java @@ -32,11 +32,14 @@ package org.opensearch.tasks; +import org.opensearch.common.annotation.PublicApi; + /** * Listener for Task success or failure. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TaskListener { /** * Handle task response. This response may constitute a failure or a success diff --git a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java index b9764c0c53f4a..931707e4a1cdc 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -49,8 +50,9 @@ * A connection profile describes how many connection are established to specific node for each of the available request types. * ({@link org.opensearch.transport.TransportRequestOptions.Type}). This allows to tailor a connection towards a specific usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ConnectionProfile { /** diff --git a/server/src/main/java/org/opensearch/transport/Header.java b/server/src/main/java/org/opensearch/transport/Header.java index a179cfb35288e..57c1da6f46aec 100644 --- a/server/src/main/java/org/opensearch/transport/Header.java +++ b/server/src/main/java/org/opensearch/transport/Header.java @@ -33,6 +33,7 @@ package org.opensearch.transport; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * Transport Header * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Header { private static final String RESPONSE_NAME = "NO_ACTION_NAME_FOR_RESPONSES"; diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java index a1ed682ff7d7f..71c4d6973505d 100644 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/InboundMessage.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -43,8 +44,9 @@ /** * Inbound data as a message * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class InboundMessage implements Releasable { private final Header header; diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java index 87786fb22f22e..dc729053c2148 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java @@ -40,6 +40,7 @@ import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -73,8 +74,9 @@ /** * Basic service for accessing remote clusters via gateway nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RemoteClusterService extends RemoteClusterAware implements Closeable { private final Logger logger = LogManager.getLogger(RemoteClusterService.class); diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index 280dd958358fd..b5f356490d3d4 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * This class encapsulates all remote cluster information to be rendered on * {@code _remote/info} requests. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RemoteConnectionInfo implements ToXContentFragment, Writeable { final ModeInfo modeInfo; @@ -132,8 +134,9 @@ public int hashCode() { /** * Mode information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface ModeInfo extends ToXContentFragment, Writeable { boolean isConnected(); diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java index f0b37c617725e..f2c159d1380e8 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java @@ -38,6 +38,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -75,7 +76,13 @@ */ public abstract class RemoteConnectionStrategy implements TransportConnectionListener, Closeable { - enum ConnectionStrategy { + /** + * Strategy to connect to remote nodes + * + * @opensearch.api + */ + @PublicApi(since = "1.0.0") + public enum ConnectionStrategy { SNIFF( SniffConnectionStrategy.CHANNELS_PER_CONNECTION, SniffConnectionStrategy::enablementSettings, diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index 98c182c562928..4368dbdece6cf 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.concurrent.ThreadContext; @@ -47,8 +48,9 @@ /** * Registry for OpenSearch RequestHandlers * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RequestHandlerRegistry { private final String action; diff --git a/server/src/main/java/org/opensearch/transport/StatsTracker.java b/server/src/main/java/org/opensearch/transport/StatsTracker.java index 5548d2d558ae2..02bc0a51c9330 100644 --- a/server/src/main/java/org/opensearch/transport/StatsTracker.java +++ b/server/src/main/java/org/opensearch/transport/StatsTracker.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.MeanMetric; import java.util.concurrent.atomic.LongAdder; @@ -39,8 +40,9 @@ /** * Tracks transport statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class StatsTracker { private final LongAdder bytesRead = new LongAdder(); diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index f98b65d0a4df1..7d4515de85d80 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; @@ -45,8 +46,9 @@ * abstraction used by the {@link TcpTransport} and {@link TransportService}. All tcp transport * implementations must return channels that adhere to the required method contracts. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TcpChannel extends CloseableChannel { /** @@ -114,8 +116,9 @@ default Optional get(String name, Class clazz) { /** * Channel statistics * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ChannelStats { private volatile long lastAccessedTime; diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index d0e6516973382..7d45152089f37 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.metrics.MeanMetric; @@ -111,8 +112,9 @@ /** * The TCP Transport layer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { private static final Logger logger = LogManager.getLogger(TcpTransport.class); @@ -966,7 +968,10 @@ public static Set getProfileSettings(Settings settings) { /** * Representation of a transport profile settings for a {@code transport.profiles.$profilename.*} + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ProfileSettings { public final String profileName; public final boolean tcpNoDelay; diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java index 8abedff37db14..b89393615c95f 100644 --- a/server/src/main/java/org/opensearch/transport/Transport.java +++ b/server/src/main/java/org/opensearch/transport/Transport.java @@ -58,8 +58,9 @@ /** * OpenSearch Transport Interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Transport extends LifecycleComponent { /** @@ -166,7 +167,10 @@ default Object getCacheKey() { /** * This class represents a response context that encapsulates the actual response handler, the action and the connection it was * executed on. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") final class ResponseContext { private final TransportResponseHandler handler; @@ -196,7 +200,10 @@ public String action() { /** * This class is a registry that allows + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") final class ResponseHandlers { private final ConcurrentMapLong> handlers = ConcurrentCollections .newConcurrentMapLongWithAggressiveConcurrency(); @@ -276,8 +283,9 @@ public TransportResponseHandler onResponseReceived( /** * Request handler implementations * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") final class RequestHandlers { private volatile Map> requestHandlers = Collections.emptyMap(); diff --git a/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java new file mode 100644 index 0000000000000..36dbd5a699b40 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; + +import java.util.Optional; + +/** + * Transport specific adapter providers which could be injected into the transport processing chain. The transport adapters + * are transport specific and do not have any common abstraction on top. + * @param transport type + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TransportAdapterProvider { + /** + * The name of this transport adapter provider (and essentially are freestyle). + * @return the name of this transport adapter provider + */ + String name(); + + /** + * Provides a new transport adapter of required transport adapter class and transport instance. + * @param transport adapter class + * @param settings settings + * @param transport HTTP transport instance + * @param adapterClass required transport adapter class + * @return the non-empty {@link Optional} if the transport adapter could be created, empty one otherwise + */ + Optional create(Settings settings, T transport, Class adapterClass); +} diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index f84ee5dc745c3..7b6715ff2c73d 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.transport.TransportResponse; import java.io.IOException; @@ -46,6 +47,7 @@ * * @opensearch.internal */ +@PublicApi(since = "1.0.0") public interface TransportChannel { Logger logger = LogManager.getLogger(TransportChannel.class); diff --git a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java index dfcd7acce3706..284c4646655c5 100644 --- a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java +++ b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java @@ -32,13 +32,15 @@ package org.opensearch.transport; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.transport.TransportResponse; /** * Listens for transport messages * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TransportMessageListener { TransportMessageListener NOOP_LISTENER = new TransportMessageListener() { diff --git a/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java b/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java index 54ee1b68fc9aa..0419c0b82029b 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java @@ -32,13 +32,15 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.tasks.Task; /** * Handles transport requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TransportRequestHandler { void messageReceived(T request, TransportChannel channel, Task task) throws Exception; diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 8992af18edb48..748d2a4d867ec 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.transport.TransportResponse; @@ -42,8 +43,9 @@ /** * Handles transport responses * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TransportResponseHandler extends Writeable.Reader { void handleResponse(T response); diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 652d57f4c5348..d08b28730d417 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -1214,7 +1214,11 @@ public void registerRequestHandler( TransportRequestHandler handler ) { validateActionName(action); - handler = interceptor.interceptHandler(action, executor, forceExecution, handler, admissionControlActionType); + if (admissionControlActionType != null) { + handler = interceptor.interceptHandler(action, executor, forceExecution, handler, admissionControlActionType); + } else { + handler = interceptor.interceptHandler(action, executor, forceExecution, handler); + } RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, requestReader, diff --git a/server/src/main/java/org/opensearch/transport/TransportStats.java b/server/src/main/java/org/opensearch/transport/TransportStats.java index e3c4773f4a472..01980ce529caa 100644 --- a/server/src/main/java/org/opensearch/transport/TransportStats.java +++ b/server/src/main/java/org/opensearch/transport/TransportStats.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Stats for transport activity * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TransportStats implements Writeable, ToXContentFragment { private final long serverOpen; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index ef26bc225b0c7..5ca5f53f180be 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -82,6 +82,7 @@ import org.mockito.ArgumentCaptor; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.arrayWithSize; @@ -332,15 +333,15 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { TransportVerifyShardBeforeCloseAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportVerifyShardBeforeCloseAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); - assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } private TransportVerifyShardBeforeCloseAction createAction() { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java index 09215088bd04b..c9d3a6c4c7605 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportShardFlushActionTests extends OpenSearchTestCase { public void testGetReplicationModeWithRemoteTranslog() { TransportShardFlushAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardFlushAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java index 8c4a6c023f9a5..90498d6d35700 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportVerifyShardIndexBlockActionTests extends OpenSearchTestCas public void testGetReplicationModeWithRemoteTranslog() { TransportVerifyShardIndexBlockAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportVerifyShardIndexBlockAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java index b2eee904bad38..bc0b7e5cf14b2 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportShardRefreshActionTests extends OpenSearchTestCase { public void testGetReplicationModeWithRemoteTranslog() { TransportShardRefreshAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardRefreshAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index 65b555649b2d0..6331861c3dcb9 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -107,6 +107,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; @@ -1237,14 +1238,14 @@ public void testHandlePrimaryTermValidationRequestSuccess() { public void testGetReplicationModeWithRemoteTranslog() { TransportShardBulkAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.PRIMARY_TERM_VALIDATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardBulkAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index da87a0a967f53..a2fefd6278321 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -84,6 +84,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; @@ -233,14 +234,14 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { final TransportResyncReplicationAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final TransportResyncReplicationAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index f025e3a63b9bf..9ee314e77ca7e 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -39,6 +39,8 @@ import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.Strings; import org.opensearch.core.tasks.TaskId; +import org.opensearch.geometry.LinearRing; +import org.opensearch.index.query.GeoShapeQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.AbstractSearchTestCase; import org.opensearch.search.Scroll; @@ -269,6 +271,19 @@ public void testDescriptionIncludesScroll() { ); } + public void testDescriptionOnSourceError() { + LinearRing linearRing = new LinearRing(new double[] { -25, -35, -25 }, new double[] { -25, -35, -25 }); + GeoShapeQueryBuilder queryBuilder = new GeoShapeQueryBuilder("geo", linearRing); + SearchRequest request = new SearchRequest(); + request.source(new SearchSourceBuilder().query(queryBuilder)); + assertThat( + toDescription(request), + equalTo( + "indices[], search_type[QUERY_THEN_FETCH], source[]" + ) + ); + } + private String toDescription(SearchRequest request) { return request.createTask(0, "test", SearchAction.NAME, TaskId.EMPTY_TASK_ID, emptyMap()).getDescription(); } diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 9ae1310a8b15c..538416e1137f5 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -6,24 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java new file mode 100644 index 0000000000000..6d118cbccb42d --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java @@ -0,0 +1,320 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.clustermanager; + +import org.opensearch.Version; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.ThreadedActionListener; +import org.opensearch.action.support.clustermanager.term.GetTermVersionResponse; +import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.equalTo; + +public class TransportClusterManagerTermCheckTests extends OpenSearchTestCase { + private static ThreadPool threadPool; + + private ClusterService clusterService; + private TransportService transportService; + private CapturingTransport transport; + private DiscoveryNode localNode; + private DiscoveryNode remoteNode; + private DiscoveryNode[] allNodes; + + @BeforeClass + public static void beforeClass() { + threadPool = new TestThreadPool("TransportMasterNodeActionTests"); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new CapturingTransport(); + clusterService = createClusterService(threadPool); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> clusterService.localNode(), + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ); + transportService.start(); + transportService.acceptIncomingRequests(); + + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + public static class Request extends ClusterManagerNodeRequest { + Request() {} + + Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + class Response extends ActionResponse { + private long identity = randomLong(); + + Response() {} + + Response(StreamInput in) throws IOException { + super(in); + identity = in.readLong(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return identity == response.identity; + } + + @Override + public int hashCode() { + return Objects.hash(identity); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(identity); + } + } + + class Action extends TransportClusterManagerNodeAction { + Action(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super( + actionName, + transportService, + clusterService, + threadPool, + new ActionFilters(new HashSet<>()), + Request::new, + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)) + ); + } + + @Override + protected void doExecute(Task task, final Request request, ActionListener listener) { + // remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER + super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener, false)); + } + + @Override + protected String executor() { + // very lightweight operation in memory, no need to fork to a thread + return ThreadPool.Names.SAME; + } + + @Override + protected boolean localExecuteSupportedByAction() { + return true; + } + + @Override + protected Response read(StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + listener.onResponse(new Response()); // default implementation, overridden in specific tests + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return null; // default implementation, overridden in specific tests + } + } + + public void testTermCheckMatchWithClusterManager() throws ExecutionException, InterruptedException { + setUpCluster(Version.CURRENT); + + TransportClusterManagerTermCheckTests.Request request = new TransportClusterManagerTermCheckTests.Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + new TransportClusterManagerTermCheckTests.Action("internal:testAction", transportService, clusterService, threadPool).execute( + request, + listener + ); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; + assertTrue(capturedRequest.node.isClusterManagerNode()); + assertThat(capturedRequest.action, equalTo("internal:monitor/term")); + GetTermVersionResponse response = new GetTermVersionResponse( + new ClusterStateTermVersion( + clusterService.state().getClusterName(), + clusterService.state().metadata().clusterUUID(), + clusterService.state().term(), + clusterService.state().version() + ) + ); + transport.handleResponse(capturedRequest.requestId, response); + assertTrue(listener.isDone()); + } + + public void testTermCheckNoMatchWithClusterManager() throws ExecutionException, InterruptedException { + setUpCluster(Version.CURRENT); + TransportClusterManagerTermCheckTests.Request request = new TransportClusterManagerTermCheckTests.Request(); + + PlainActionFuture listener = new PlainActionFuture<>(); + new TransportClusterManagerTermCheckTests.Action("internal:testAction", transportService, clusterService, threadPool).execute( + request, + listener + ); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest termCheckRequest = transport.capturedRequests()[0]; + assertTrue(termCheckRequest.node.isClusterManagerNode()); + assertThat(termCheckRequest.action, equalTo("internal:monitor/term")); + GetTermVersionResponse noMatchResponse = new GetTermVersionResponse( + new ClusterStateTermVersion( + clusterService.state().getClusterName(), + clusterService.state().metadata().clusterUUID(), + clusterService.state().term(), + clusterService.state().version() - 1 + ) + ); + transport.handleResponse(termCheckRequest.requestId, noMatchResponse); + assertFalse(listener.isDone()); + + assertThat(transport.capturedRequests().length, equalTo(2)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[1]; + assertTrue(capturedRequest.node.isClusterManagerNode()); + assertThat(capturedRequest.request, equalTo(request)); + assertThat(capturedRequest.action, equalTo("internal:testAction")); + + TransportClusterManagerTermCheckTests.Response response = new TransportClusterManagerTermCheckTests.Response(); + transport.handleResponse(capturedRequest.requestId, response); + assertTrue(listener.isDone()); + assertThat(listener.get(), equalTo(response)); + + } + + public void testTermCheckOnOldVersionClusterManager() throws ExecutionException, InterruptedException { + + setUpCluster(Version.V_2_12_0); + TransportClusterManagerTermCheckTests.Request request = new TransportClusterManagerTermCheckTests.Request(); + + PlainActionFuture listener = new PlainActionFuture<>(); + new TransportClusterManagerTermCheckTests.Action("internal:testAction", transportService, clusterService, threadPool).execute( + request, + listener + ); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; + assertTrue(capturedRequest.node.isClusterManagerNode()); + assertThat(capturedRequest.request, equalTo(request)); + assertThat(capturedRequest.action, equalTo("internal:testAction")); + + TransportClusterManagerTermCheckTests.Response response = new TransportClusterManagerTermCheckTests.Response(); + transport.handleResponse(capturedRequest.requestId, response); + assertTrue(listener.isDone()); + assertThat(listener.get(), equalTo(response)); + + } + + private void setUpCluster(Version clusterManagerVersion) { + localNode = new DiscoveryNode( + "local_node", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + remoteNode = new DiscoveryNode( + "remote_node", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + clusterManagerVersion + ); + allNodes = new DiscoveryNode[] { localNode, remoteNode }; + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); + + } +} diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionIT.java b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionIT.java new file mode 100644 index 0000000000000..7b783e025a575 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionIT.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.opensearch.action.admin.cluster.state.ClusterStateAction; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.is; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ClusterTermVersionIT extends OpenSearchIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MockTransportService.TestPlugin.class); + } + + public void testClusterStateResponseFromDataNode() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + ensureClusterSizeConsistency(); + ensureGreen(); + + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.waitForTimeout(TimeValue.timeValueHours(1)); + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(clusterStateRequest).get(); + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + assertThat(stateResponse.isWaitForTimedOut(), is(false)); + + } + + public void testClusterStateResponseFromClusterManagerNode() throws Exception { + String master = internalCluster().startClusterManagerOnlyNode(); + String data = internalCluster().startDataOnlyNode(); + ensureClusterSizeConsistency(); + ensureGreen(); + Map callCounters = Map.ofEntries( + Map.entry(ClusterStateAction.NAME, new AtomicInteger()), + Map.entry(GetTermVersionAction.NAME, new AtomicInteger()) + ); + + addCallCountInterceptor(master, callCounters); + + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(new ClusterStateRequest()).get(); + + AtomicInteger clusterStateCallsOnMaster = callCounters.get(ClusterStateAction.NAME); + AtomicInteger termCallsOnMaster = callCounters.get(GetTermVersionAction.NAME); + + assertThat(clusterStateCallsOnMaster.get(), is(0)); + assertThat(termCallsOnMaster.get(), is(1)); + + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + + } + + public void testDatanodeOutOfSync() throws Exception { + String master = internalCluster().startClusterManagerOnlyNode(); + String data = internalCluster().startDataOnlyNode(); + ensureClusterSizeConsistency(); + ensureGreen(); + Map callCounters = Map.ofEntries( + Map.entry(ClusterStateAction.NAME, new AtomicInteger()), + Map.entry(GetTermVersionAction.NAME, new AtomicInteger()) + ); + + stubClusterTermResponse(master); + addCallCountInterceptor(master, callCounters); + + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(new ClusterStateRequest()).get(); + + AtomicInteger clusterStateCallsOnMaster = callCounters.get(ClusterStateAction.NAME); + AtomicInteger termCallsOnMaster = callCounters.get(GetTermVersionAction.NAME); + + assertThat(clusterStateCallsOnMaster.get(), is(1)); + assertThat(termCallsOnMaster.get(), is(1)); + + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + } + + private void addCallCountInterceptor(String nodeName, Map callCounters) { + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeName); + for (var ctrEnty : callCounters.entrySet()) { + primaryService.addRequestHandlingBehavior(ctrEnty.getKey(), (handler, request, channel, task) -> { + ctrEnty.getValue().incrementAndGet(); + logger.info("--> {} response redirect", ClusterStateAction.NAME); + handler.messageReceived(request, channel, task); + }); + } + } + + private void stubClusterTermResponse(String master) { + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance(TransportService.class, master); + primaryService.addRequestHandlingBehavior(GetTermVersionAction.NAME, (handler, request, channel, task) -> { + channel.sendResponse(new GetTermVersionResponse(new ClusterStateTermVersion(new ClusterName("test"), "1", -1, -1))); + }); + } + +} diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionTests.java new file mode 100644 index 0000000000000..23ae8c6a58776 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionTests.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager.term; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.concurrent.ExecutionException; + +public class ClusterTermVersionTests extends OpenSearchSingleNodeTestCase { + + public void testTransportTermResponse() throws ExecutionException, InterruptedException { + GetTermVersionRequest request = new GetTermVersionRequest(); + GetTermVersionResponse resp = client().execute(GetTermVersionAction.INSTANCE, request).get(); + + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + + assertTrue(resp.matches(clusterService.state())); + } +} diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java new file mode 100644 index 0000000000000..626c2f74f09c4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java @@ -0,0 +1,216 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShardTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +public class ReplicationModeAwareProxyTests extends OpenSearchTestCase { + + /* + Replication action running on the same primary copy from which it originates. + Action should not run and proxy should return ReplicationMode.NO_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingCurrentPrimary() { + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + null, + true, + ShardRoutingState.STARTED, + AllocationId.newInitializing("abc") + ); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + null, + true, + ShardRoutingState.STARTED, + AllocationId.newInitializing("abc") + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder().add(IndexShardTestUtils.getFakeRemoteEnabledNode("dummy-node")).build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + randomBoolean() + ); + assertEquals(ReplicationMode.NO_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from failing primary to replica being promoted to primary + Action should run and proxy should return ReplicationMode.FULL_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingRelocatingPrimary() { + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId relocationTargetId = AllocationId.newTargetRelocation(primaryId); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + null, + true, + ShardRoutingState.INITIALIZING, + relocationTargetId + ); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + "dummy-node-2", + true, + ShardRoutingState.RELOCATING, + primaryId + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + randomBoolean() + ); + assertEquals(ReplicationMode.FULL_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to docrep replica during remote store migration + Action should run and proxy should return ReplicationMode.FULL_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingDocrepShard() { + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + true, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + false, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeDiscoNode(targetRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(ReplicationMode.FULL_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to remote replica during remote store migration + Action should not run and proxy should return ReplicationMode.NO_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingRemoteShard() { + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(ReplicationMode.NO_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to remote enabled replica during remote store migration + with an explicit replication mode specified + Action should run and proxy should return the overridden Replication Mode + */ + public void testDetermineReplicationWithExplicitOverrideTargetRoutingRemoteShard() { + ReplicationMode replicationModeOverride = ReplicationMode.PRIMARY_TERM_VALIDATION; + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + replicationModeOverride, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(replicationModeOverride, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary with remote enabled index settings enabled + Action should not query the DiscoveryNodes object + */ + public void testDetermineReplicationWithRemoteIndexSettingsEnabled() { + DiscoveryNodes mockDiscoveryNodes = mock(DiscoveryNodes.class); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + mockDiscoveryNodes, + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + true + ); + replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting); + // Verify no interactions with the DiscoveryNodes object + verify(mockDiscoveryNodes, never()).get(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 6b54623b03164..ec5fc1d19e40d 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; @@ -59,6 +60,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardNotStartedException; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.node.NodeClosedException; import org.opensearch.test.OpenSearchTestCase; @@ -239,7 +241,13 @@ public void testReplicationWithRemoteTranslogEnabled() throws Exception { listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildRemoteStoreEnabledDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + true + ) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -304,7 +312,13 @@ public void testPrimaryToPrimaryReplicationWithRemoteTranslogEnabled() throws Ex listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildRemoteStoreEnabledDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + true + ) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -380,6 +394,144 @@ public void testForceReplicationWithRemoteTranslogEnabled() throws Exception { assertEquals(activeIds.size() + initializingIds.size(), shardInfo.getTotal()); } + public void testReplicationInDualModeWithDocrepReplica() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId); + builder.addShard(routing); + }); + activeIds.stream().filter(aId -> !aId.equals(primaryId)).forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId); + builder.addShard(routing); + }); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + false + ) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + // During dual replication, except for primary, replication action should be executed on all the replicas + assertEquals(activeIds.size() - 1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + // All initializing and active shards are set to docrep + assertEquals(initializingIds.size() + activeIds.size(), shardInfo.getTotal()); + } + + public void testReplicationInDualModeWithMixedReplicasSomeInDocrepOthersOnRemote() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId); + builder.addShard(routing); + }); + activeIds.stream().filter(aId -> !aId.equals(primaryId)).forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId); + builder.addShard(routing); + }); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + // Generating data nodes in mixed mode wherein some of the allocated replicas + // are in docrep nodes whereas others are on remote enabled ones + Tuple discoveryNodesDetails = buildMixedModeDiscoveryNodes(routingTable); + int docRepNodes = discoveryNodesDetails.v1(); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, discoveryNodesDetails.v2(), replicasProxy, replicasProxy, false) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + // Only docrep nodes should have the request fanned out to + assertEquals(docRepNodes, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + // Listener should be invoked for initializing Ids, primary and the operations on docrep nodes + assertEquals(1 + docRepNodes + initializingIds.size(), shardInfo.getTotal()); + } + static String nodeIdFromAllocationId(final AllocationId allocationId) { return "n-" + allocationId.getId().substring(0, 8); } @@ -816,6 +968,46 @@ private Set getExpectedReplicas(ShardId shardId, ClusterState stat return expectedReplicas; } + private DiscoveryNodes buildRemoteStoreEnabledDiscoveryNodes(IndexShardRoutingTable routingTable) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } + return builder.build(); + } + + private DiscoveryNodes buildDiscoveryNodes(IndexShardRoutingTable routingTable) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + if (shardRouting.primary()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(shardRouting.currentNodeId())); + } + } + return builder.build(); + } + + private Tuple buildMixedModeDiscoveryNodes(IndexShardRoutingTable routingTable) { + int docrepNodes = 0; + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + if (shardRouting.primary()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } else { + // Only add docrep nodes for allocationIds that are active + // since the test cases creates replication group with active allocationIds only + if (shardRouting.active() && randomBoolean()) { + builder.add(IndexShardTestUtils.getFakeDiscoNode(shardRouting.currentNodeId())); + docrepNodes += 1; + } else { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } + } + } + return new Tuple<>(docrepNodes, builder.build()); + } + public static class Request extends ReplicationRequest { public AtomicBoolean processedOnPrimary = new AtomicBoolean(); public AtomicBoolean runPostReplicationActionsOnPrimary = new AtomicBoolean(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index dad0fa0efd3ec..4a18778cc0b2b 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -78,6 +78,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; +import org.opensearch.index.remote.RemoteStoreTestsHelper; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexShardState; @@ -1589,9 +1590,15 @@ private IndexService mockIndexService(final IndexMetadata indexMetadata, Cluster @SuppressWarnings("unchecked") private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { + return mockIndexShard(shardId, clusterService, false); + } + + @SuppressWarnings("unchecked") + private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService, boolean remote) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); when(indexShard.state()).thenReturn(IndexShardState.STARTED); + when(indexShard.indexSettings()).thenReturn(RemoteStoreTestsHelper.createIndexSettings(remote)); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; if (isPrimaryMode.get()) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cf4de32890a2a..a2f19b8c694d0 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -71,7 +71,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; @@ -1587,32 +1588,38 @@ public void testBuildIndexMetadata() { */ public void testRemoteCustomData() { // Case 1 - Remote store is not enabled - IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(RemoteStorePathType.values())); + IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(PathType.values())); assertNull(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); // Case 2 - cluster.remote_store.index.path.prefix.optimised=fixed (default value) - indexMetadata = testRemoteCustomData(true, RemoteStorePathType.FIXED); + indexMetadata = testRemoteCustomData(true, PathType.FIXED); + validateRemoteCustomData(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathType.NAME, PathType.FIXED.name()); validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - RemoteStorePathType.NAME, - RemoteStorePathType.FIXED.toString() + PathHashAlgorithm.NAME, + PathHashAlgorithm.FNV_1A.name() ); // Case 3 - cluster.remote_store.index.path.prefix.optimised=hashed_prefix - indexMetadata = testRemoteCustomData(true, RemoteStorePathType.HASHED_PREFIX); + indexMetadata = testRemoteCustomData(true, PathType.HASHED_PREFIX); validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - RemoteStorePathType.NAME, - RemoteStorePathType.HASHED_PREFIX.toString() + PathType.NAME, + PathType.HASHED_PREFIX.toString() + ); + validateRemoteCustomData( + indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), + PathHashAlgorithm.NAME, + PathHashAlgorithm.FNV_1A.name() ); } - private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, RemoteStorePathType remoteStorePathType) { + private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType pathType) { Settings.Builder settingsBuilder = Settings.builder(); if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), remoteStorePathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index 62dce9c4edeb5..11cbe89645657 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -72,6 +72,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -140,10 +141,14 @@ public void testIndexBalance() { } private Settings.Builder getSettingsBuilderForPrimaryBalance() { - return getSettingsBuilderForPrimaryBalance(true); + return getSettingsBuilderForPrimaryBalance(true, false); } - private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance) { + private Settings.Builder getSettingsBuilderForPrimaryReBalance() { + return getSettingsBuilderForPrimaryBalance(true, true); + } + + private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance, boolean preferPrimaryRebalance) { final float indexBalance = 0.55f; final float shardBalance = 0.45f; final float balanceThreshold = 1.0f; @@ -155,6 +160,7 @@ private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrima ); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance); + settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); return settings; @@ -201,7 +207,7 @@ public void testPrimaryBalanceWithoutPreferPrimaryBalanceSetting() { int balanceFailed = 0; AllocationService strategy = createAllocationService( - getSettingsBuilderForPrimaryBalance(false).build(), + getSettingsBuilderForPrimaryBalance(false, false).build(), new TestGatewayAllocator() ); for (int i = 0; i < numberOfRuns; i++) { @@ -244,6 +250,60 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { assertTrue(balanceFailed <= 1); } + /** + * This test verifies primary shard balance is attained setting. + */ + public void testPrimaryBalanceNotSolvedForNodeDropWithPreferPrimaryBalanceSetting() { + final int numberOfNodes = 4; + final int numberOfIndices = 4; + final int numberOfShards = 4; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + final float buffer = 0.10f; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPrimaryBalance(clusterState, buffer); + } catch (AssertionError | Exception e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed >= 4); + } + + /** + * This test verifies primary shard balance is attained with PREFER_PRIMARY_SHARD_BALANCE setting. + */ + public void testPrimaryBalanceSolvedWithPreferPrimaryRebalanceSetting() { + final int numberOfNodes = 4; + final int numberOfIndices = 4; + final int numberOfShards = 4; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + final float buffer = 0.10f; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryReBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPrimaryBalance(clusterState, buffer); + } catch (Exception e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed <= 1); + } + /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. @@ -367,8 +427,7 @@ public void testPrimaryBalanceWithContrainstBreaching() { */ public void testGlobalPrimaryBalance() throws Exception { AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); clusterState = addNode(clusterState, strategy); clusterState = addNode(clusterState, strategy); @@ -377,7 +436,30 @@ public void testGlobalPrimaryBalance() throws Exception { clusterState = addIndex(clusterState, strategy, "test-index3", 1, 1); logger.info(ShardAllocations.printShardDistribution(clusterState)); - verifyPrimaryBalance(clusterState); + verifyPrimaryBalance(clusterState, 0.0f); + } + + /** + * This test verifies global balance by creating indices iteratively and verify primary shards do not pile up on one + * @throws Exception generic exception + */ + public void testGlobalPrimaryBalanceWithNodeDrops() throws Exception { + final float buffer = 0.10f; + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryReBalance().build(), new TestGatewayAllocator()); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); + clusterState = addNodes(clusterState, strategy, 5); + + clusterState = addIndices(clusterState, strategy, 5, 1, 8); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState, buffer); + + clusterState = removeOneNode(clusterState, strategy); + + clusterState = applyAllocationUntilNoChange(clusterState, strategy); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState, buffer); } /** @@ -537,7 +619,7 @@ private void verifyPerIndexPrimaryBalance(ClusterState currentState) { } } - private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { + private void verifySkewedPrimaryBalance(ClusterState clusterState, int delta) throws Exception { assertBusy(() -> { RoutingNodes nodes = clusterState.getRoutingNodes(); int totalPrimaryShards = 0; @@ -545,13 +627,36 @@ private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { totalPrimaryShards += index.primaryShardsActive(); } final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + int maxPrimaryShardOnNode = Integer.MIN_VALUE; + int minPrimaryShardOnNode = Integer.MAX_VALUE; for (RoutingNode node : nodes) { final int primaryCount = node.shardsWithState(STARTED) .stream() .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + maxPrimaryShardOnNode = Math.max(maxPrimaryShardOnNode, primaryCount); + minPrimaryShardOnNode = Math.min(minPrimaryShardOnNode, primaryCount); + } + assertTrue(maxPrimaryShardOnNode - minPrimaryShardOnNode < delta); + }, 60, TimeUnit.SECONDS); + } + + private void verifyPrimaryBalance(ClusterState clusterState, float buffer) throws Exception { + assertBusy(() -> { + RoutingNodes nodes = clusterState.getRoutingNodes(); + int totalPrimaryShards = 0; + for (final IndexRoutingTable index : clusterState.getRoutingTable().indicesRouting().values()) { + totalPrimaryShards += index.primaryShardsActive(); + } + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } @@ -567,8 +672,8 @@ public void testShardBalance() { ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString() ); - settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); AllocationService strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); @@ -634,6 +739,34 @@ private ClusterState addIndex( return applyAllocationUntilNoChange(clusterState, strategy); } + private ClusterState addIndices( + ClusterState clusterState, + AllocationService strategy, + int numberOfShards, + int numberOfReplicas, + int numberOfIndices + ) { + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.getMetadata()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + + for (int i = 0; i < numberOfIndices; i++) { + IndexMetadata.Builder index = IndexMetadata.builder("test" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas); + + metadataBuilder = metadataBuilder.put(index); + routingTableBuilder.addAsNew(index.build()); + } + + clusterState = ClusterState.builder(clusterState) + .metadata(metadataBuilder.build()) + .routingTable(routingTableBuilder.build()) + .build(); + clusterState = strategy.reroute(clusterState, "indices-created"); + return applyAllocationUntilNoChange(clusterState, strategy); + } + private ClusterState initCluster( AllocationService strategy, int numberOfIndices, @@ -664,7 +797,7 @@ private ClusterState initCluster( for (int i = 0; i < numberOfNodes; i++) { nodes.add(newNode("node" + i)); } - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(nodes) .metadata(metadata) .routingTable(initialRoutingTable) @@ -673,6 +806,17 @@ private ClusterState initCluster( return applyAllocationUntilNoChange(clusterState, strategy); } + private ClusterState addNodes(ClusterState clusterState, AllocationService strategy, int numberOfNodes) { + logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numberOfNodes); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + for (int i = 0; i < numberOfNodes; i++) { + nodes.add(newNode("node" + (clusterState.nodes().getSize() + i))); + } + clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + return applyStartedShardsUntilNoChange(clusterState, strategy); + } + private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState) @@ -918,7 +1062,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing nodes.add(node); } - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(nodes) .metadata(metadata) .routingTable(routingTable) diff --git a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java index 070fde523fe87..b0916ce9236f7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java @@ -218,7 +218,8 @@ public void testTasksAreExecutedInOrder() throws BrokenBarrierException, Interru executors[i] = new TaskExecutor(); } - int tasksSubmittedPerThread = randomIntBetween(2, 1024); + // it will create at most 8192 threads, which will cause native memory oom. so we limit the number of created threads. + int tasksSubmittedPerThread = randomIntBetween(2, 128); CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); @@ -286,7 +287,7 @@ public void testNoTasksAreDroppedInParallelSubmission() throws BrokenBarrierExce executors[i] = new TaskExecutor(); } - int tasksSubmittedPerThread = randomIntBetween(2, 1024); + int tasksSubmittedPerThread = randomIntBetween(2, 128); CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); diff --git a/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java b/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java new file mode 100644 index 0000000000000..8d41211f10134 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hashing; + +import org.opensearch.common.hash.FNV1a; +import org.opensearch.test.OpenSearchTestCase; + +public class FNV1aTests extends OpenSearchTestCase { + + public void testHash32WithKnownValues() { + assertEquals(-1114940029532279145L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentsdata")); + assertEquals(-5313793557685118702L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentsmetadata")); + assertEquals(-4776941653547780179L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0translogdata")); + assertEquals(7773876801598345624L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0translogmetadata")); + assertEquals(3174284101845744576L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentslock_files")); + assertEquals(875447599647258598L, FNV1a.hash32("hell")); + assertEquals(1460560186469985451L, FNV1a.hash32("hello")); + assertEquals(-4959477702557352110L, FNV1a.hash32("hello w")); + assertEquals(-777130915070571257L, FNV1a.hash32("hello wo")); + assertEquals(-7887204531510399185L, FNV1a.hash32("hello wor")); + assertEquals(-782004333700192647L, FNV1a.hash32("hello worl")); + assertEquals(2168278929747165095L, FNV1a.hash32("hello world")); + assertEquals(2655121221658607504L, FNV1a.hash32("The quick brown fox jumps over the lazy dog")); + } + + public void testHash64WithKnownValues() { + assertEquals(-8975854101357662761L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentsdata")); + assertEquals(-4380291990281602606L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentsmetadata")); + assertEquals(-4532418109365814419L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0translogdata")); + assertEquals(41331743556869080L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0translogmetadata")); + assertEquals(6170437157231275808L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentslock_files")); + assertEquals(763638091547294502L, FNV1a.hash64("hell")); + assertEquals(-6615550055289275125L, FNV1a.hash64("hello")); + assertEquals(-8428874042178798254L, FNV1a.hash64("hello w")); + assertEquals(-6323438951910650201L, FNV1a.hash64("hello wo")); + assertEquals(7042426588567368687L, FNV1a.hash64("hello wor")); + assertEquals(7273314957493782425L, FNV1a.hash64("hello worl")); + assertEquals(8618312879776256743L, FNV1a.hash64("hello world")); + assertEquals(-866459186506731248L, FNV1a.hash64("The quick brown fox jumps over the lazy dog")); + } + +} diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 1c607ca0dc98b..447377e372e61 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,13 +47,15 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TcpTransport; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -73,38 +75,60 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import static org.hamcrest.CoreMatchers.startsWith; + public class NetworkModuleTests extends OpenSearchTestCase { private ThreadPool threadPool; - private SecureTransportSettingsProvider secureTransportSettingsProvider; + private SecureSettingsFactory secureSettingsFactory; @Override public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(NetworkModuleTests.class.getName()); - secureTransportSettingsProvider = new SecureTransportSettingsProvider() { - @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { - return Optional.empty(); - } + secureSettingsFactory = new SecureSettingsFactory() { @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { - return Optional.empty(); + public Optional getSecureTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureTransportSettingsProvider() { + @Override + public Optional buildServerTransportExceptionHandler( + Settings settings, + Transport transport + ) { + return Optional.empty(); + } + + @Override + public Optional buildSecureServerTransportEngine(Settings settings, Transport transport) + throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) + throws SSLException { + return Optional.empty(); + } + }); } @Override - public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { - return Optional.empty(); + public Optional getSecureHttpTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureHttpTransportSettingsProvider() { + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) + throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildHttpServerExceptionHandler( + Settings settings, + HttpServerTransport transport + ) { + return Optional.empty(); + } + }); } }; } @@ -211,7 +235,7 @@ public Map> getSecureTransports( return Collections.singletonMap("custom-secure", custom); } }; - NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), plugin); + NetworkModule module = newNetworkModule(settings, null, List.of(secureSettingsFactory), plugin); assertSame(custom, module.getTransportSupplier()); } @@ -222,7 +246,7 @@ public void testRegisterSecureHttpTransport() { .build(); Supplier custom = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, List.of(secureSettingsFactory), new NetworkPlugin() { @Override public Map> getSecureHttpTransports( Settings settings, @@ -234,7 +258,7 @@ public Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureTransportSettingsProvider, Tracer tracer ) { return Collections.singletonMap("custom-secure", custom); @@ -595,7 +619,7 @@ private NetworkModule newNetworkModule( private NetworkModule newNetworkModule( Settings settings, List coreTransportInterceptors, - List secureTransportSettingsProviders, + List secureSettingsFactories, NetworkPlugin... plugins ) { return new NetworkModule( @@ -612,7 +636,33 @@ private NetworkModule newNetworkModule( new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), NoopTracer.INSTANCE, coreTransportInterceptors, - secureTransportSettingsProviders + secureSettingsFactories + ); + } + + public void testRegisterSecureTransportMultipleProviers() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom-secure").build(); + Supplier custom = () -> null; // content doesn't matter we check reference equality + NetworkPlugin plugin = new NetworkPlugin() { + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap("custom-secure", custom); + } + }; + + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> newNetworkModule(settings, null, List.of(secureSettingsFactory, secureSettingsFactory), plugin) ); + assertThat(ex.getMessage(), startsWith("there is more than one secure transport settings provider")); } } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index c6da96b521276..7ebee680e8e52 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -201,7 +201,7 @@ public void testMemorySize() { assertEquals(new ByteSizeValue(12), value.get()); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "20%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get()); + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get()); } public void testMemorySizeWithFallbackValue() { @@ -219,10 +219,12 @@ public void testMemorySizeWithFallbackValue() { assertEquals(memorySizeValue.getBytes(), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2, 1.0); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "30%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3)), value.get()); + // If value=getHeapMax()*0.3 is bigger than 2gb, and is bigger than Integer.MAX_VALUE, + // then (long)((int) value) will lose precision. + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3)), value.get()); assertTrue(settingUpdater.apply(Settings.builder().put("b.byte.size", "40%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), value.get()); + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), value.get()); } public void testSimpleUpdate() { diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index f175308482b15..88cb3782252b7 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -8,9 +8,14 @@ package org.opensearch.common.util; +import org.opensearch.common.settings.Settings; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; +import static org.opensearch.common.util.FeatureFlags.DATETIME_FORMATTER_CACHING; +import static org.opensearch.common.util.FeatureFlags.EXTENSIONS; +import static org.opensearch.common.util.FeatureFlags.IDENTITY; + public class FeatureFlagTests extends OpenSearchTestCase { private final String FLAG_PREFIX = "opensearch.experimental.feature."; @@ -33,4 +38,33 @@ public void testNonBooleanFeatureFlag() { assertNotNull(System.getProperty(javaVersionProperty)); assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); } + + public void testBooleanFeatureFlagWithDefaultSetToTrue() { + final String testFlag = DATETIME_FORMATTER_CACHING; + assertNotNull(testFlag); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } + + public void testBooleanFeatureFlagWithDefaultSetToFalse() { + final String testFlag = IDENTITY; + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + assertNotNull(testFlag); + assertFalse(FeatureFlags.isEnabled(testFlag)); + } + + public void testBooleanFeatureFlagInitializedWithEmptySettingsAndDefaultSetToTrue() { + final String testFlag = DATETIME_FORMATTER_CACHING; + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + assertNotNull(testFlag); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } + + public void testInitializeFeatureFlagsWithExperimentalSettings() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(IDENTITY, true).build()); + assertTrue(FeatureFlags.isEnabled(IDENTITY)); + assertTrue(FeatureFlags.isEnabled(DATETIME_FORMATTER_CACHING)); + assertFalse(FeatureFlags.isEnabled(EXTENSIONS)); + // reset FeatureFlags to defaults + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } } diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java new file mode 100644 index 0000000000000..464038c93228b --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java @@ -0,0 +1,849 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterInfo; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.seqno.ReplicationTracker; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; +import org.opensearch.snapshots.SnapshotShardSizeInfo; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Collections.unmodifiableMap; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ReplicaShardBatchAllocatorTests extends OpenSearchAllocationTestCase { + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.opensearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; + private final ShardId shardId = new ShardId("test", "_na_", 0); + private final DiscoveryNode node1 = newNode("node1"); + private final DiscoveryNode node2 = newNode("node2"); + private final DiscoveryNode node3 = newNode("node3"); + + private TestBatchAllocator testBatchAllocator; + + @Before + public void buildTestAllocator() { + this.testBatchAllocator = new TestBatchAllocator(); + } + + private void allocateAllUnassignedBatch(final RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List shardToBatch = new ArrayList<>(); + while (iterator.hasNext()) { + shardToBatch.add(iterator.next()); + } + testBatchAllocator.allocateUnassignedBatch(shardToBatch, allocation); + } + + /** + * Verifies that when we are still fetching data in an async manner, the replica shard moves to ignore unassigned. + */ + public void testNoAsyncFetchData() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.clean(); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that on index creation, we don't fetch data for any shards, but keep the replica shard unassigned to let + * the shard allocator to allocate it. There isn't a copy around to find anyhow. + */ + public void testAsyncFetchWithNoShardOnIndexCreation() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.EMPTY, + UnassignedInfo.Reason.INDEX_CREATED + ); + testBatchAllocator.clean(); + allocateAllUnassignedBatch(allocation); + assertThat(testBatchAllocator.getFetchDataCalledAndClean(), equalTo(false)); + assertThat(testBatchAllocator.getShardEligibleFetchDataCountAndClean(), equalTo(0)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that for anything but index creation, fetch data ends up being called, since we need to go and try + * and find a better copy for the shard. + */ + public void testAsyncFetchOnAnythingButIndexCreation() { + UnassignedInfo.Reason reason = RandomPicks.randomFrom( + random(), + EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED)) + ); + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, reason); + testBatchAllocator.clean(); + allocateAllUnassignedBatch(allocation); + assertThat("failed with reason " + reason, testBatchAllocator.getFetchDataCalledAndClean(), equalTo(true)); + assertThat("failed with reason" + reason, testBatchAllocator.getShardEligibleFetchDataCountAndClean(), equalTo(1)); + } + + /** + * Verifies that when there is a full match (syncId and files) we allocate it to matching node. + */ + public void testSimpleFullMatchAllocation() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId()) + ); + } + + /** + * Verifies that when there is a sync id match but no files match, we allocate it to matching node. + */ + public void testSyncIdMatch() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", null, new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId()) + ); + } + + /** + * Verifies that when there is no sync id match but files match, we allocate it to matching node. + */ + public void testFileChecksumMatch() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "NO_MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId()) + ); + } + + public void testPreferCopyWithHighestMatchingOperations() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + long retainingSeqNoOnPrimary = randomLongBetween(1, Integer.MAX_VALUE); + long retainingSeqNoForNode2 = randomLongBetween(0, retainingSeqNoOnPrimary - 1); + // Rarely use a seqNo above retainingSeqNoOnPrimary, which could in theory happen when primary fails and comes back quickly. + long retainingSeqNoForNode3 = randomLongBetween(retainingSeqNoForNode2 + 1, retainingSeqNoOnPrimary + 100); + List retentionLeases = Arrays.asList( + newRetentionLease(node1, retainingSeqNoOnPrimary), + newRetentionLease(node2, retainingSeqNoForNode2), + newRetentionLease(node3, retainingSeqNoForNode3) + ); + testBatchAllocator.addData( + node1, + retentionLeases, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "NOT_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + } + + public void testCancelRecoveryIfFoundCopyWithNoopRetentionLease() { + final UnassignedInfo unassignedInfo; + final Set failedNodes; + if (randomBoolean()) { + failedNodes = Collections.emptySet(); + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null); + } else { + failedNodes = new HashSet<>(randomSubsetOf(Arrays.asList("node-4", "node-5", "node-6"))); + unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.ALLOCATION_FAILED, + null, + null, + randomIntBetween(1, 10), + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + failedNodes + ); + } + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo); + long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE); + testBatchAllocator.addData( + node1, + Arrays.asList(newRetentionLease(node1, retainingSeqNo), newRetentionLease(node3, retainingSeqNo)), + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "NO_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + Collection replicaShards = allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED); + List shardRoutingBatch = new ArrayList<>(replicaShards); + List> shardBatchList = Collections.singletonList( + new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING)) + ); + + testBatchAllocator.processExistingRecoveries(allocation, shardBatchList); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + List unassignedShards = allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED); + assertThat(unassignedShards, hasSize(1)); + assertThat(unassignedShards.get(0).shardId(), equalTo(shardId)); + assertThat(unassignedShards.get(0).unassignedInfo().getNumFailedAllocations(), equalTo(0)); + assertThat(unassignedShards.get(0).unassignedInfo().getFailedNodeIds(), equalTo(failedNodes)); + } + + public void testNotCancellingRecoveryIfCurrentRecoveryHasRetentionLease() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + List peerRecoveryRetentionLeasesOnPrimary = new ArrayList<>(); + long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE); + peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node1, retainingSeqNo)); + peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node2, randomLongBetween(1, retainingSeqNo))); + if (randomBoolean()) { + peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNo))); + } + testBatchAllocator.addData( + node1, + peerRecoveryRetentionLeasesOnPrimary, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testNotCancelIfPrimaryDoesNotHaveValidRetentionLease() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + testBatchAllocator.addData( + node1, + Collections.singletonList(newRetentionLease(node3, randomNonNegativeLong())), + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "NOT_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + "NOT_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testIgnoreRetentionLeaseIfCopyIsEmpty() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE); + List retentionLeases = new ArrayList<>(); + retentionLeases.add(newRetentionLease(node1, retainingSeqNo)); + retentionLeases.add(newRetentionLease(node2, randomLongBetween(0, retainingSeqNo))); + if (randomBoolean()) { + retentionLeases.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNo))); + } + testBatchAllocator.addData( + node1, + retentionLeases, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData(node2, null, null); // has retention lease but store is empty + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + } + + /** + * When we can't find primary data, but still find replica data, we go ahead and keep it unassigned + * to be allocated. This is today behavior, which relies on a primary corruption identified with + * adding a replica and having that replica actually recover and cause the corruption to be identified + * See CorruptFileTest# + */ + public void testNoPrimaryData() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.addData( + node2, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that when there is primary data, but no data at all on other nodes, the shard keeps + * unassigned to be allocated later on. + */ + public void testNoDataForReplicaOnAnyNode() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.addData( + node1, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that when there is primary data, but no matching data at all on other nodes, the shard keeps + * unassigned to be allocated later on. + */ + public void testNoMatchingFilesForReplicaOnAnyNode() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", null, new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * When there is no decision or throttle decision across all nodes for the shard, make sure the shard + * moves to the ignore unassigned list. + */ + public void testNoOrThrottleDecidersRemainsInUnassigned() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders() + ); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Tests when the node to allocate to due to matching is being throttled, we move the shard to ignored + * to wait till throttling on it is done. + */ + public void testThrottleWhenAllocatingToMatchingNode() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + new AllocationDeciders( + Arrays.asList( + new TestAllocateDecision(Decision.YES), + new SameShardAllocationDecider( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ), + new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (node.node().equals(node2)) { + return Decision.THROTTLE; + } + return Decision.YES; + } + } + ) + ) + ); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + public void testDelayedAllocation() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), + UnassignedInfo.Reason.NODE_LEFT + ); + testBatchAllocator.addData( + node1, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + if (randomBoolean()) { + // we sometime return empty list of files, make sure we test this as well + testBatchAllocator.addData(node2, null, null); + } + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + + allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), + UnassignedInfo.Reason.NODE_LEFT + ); + testBatchAllocator.addData( + node2, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId()) + ); + } + + public void testCancelRecoveryBetterSyncId() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { + final UnassignedInfo unassignedInfo; + if (randomBoolean()) { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null); + } else { + unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.ALLOCATION_FAILED, + null, + null, + randomIntBetween(1, 10), + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.singleton("node-4") + ); + } + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo); + List retentionLeases = new ArrayList<>(); + if (randomBoolean()) { + long retainingSeqNoOnPrimary = randomLongBetween(0, Long.MAX_VALUE); + retentionLeases.add(newRetentionLease(node1, retainingSeqNoOnPrimary)); + if (randomBoolean()) { + retentionLeases.add(newRetentionLease(node2, randomLongBetween(0, retainingSeqNoOnPrimary))); + } + if (randomBoolean()) { + retentionLeases.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNoOnPrimary))); + } + } + testBatchAllocator.addData( + node1, + retentionLeases, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testNotCancellingRecovery() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testDoNotCancelForBrokenNode() { + Set failedNodes = new HashSet<>(); + failedNodes.add(node3.getId()); + if (randomBoolean()) { + failedNodes.add("node4"); + } + UnassignedInfo unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.ALLOCATION_FAILED, + null, + null, + randomIntBetween(failedNodes.size(), 10), + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + failedNodes + ); + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo); + long retainingSeqNoOnPrimary = randomLongBetween(0, Long.MAX_VALUE); + List retentionLeases = Arrays.asList( + newRetentionLease(node1, retainingSeqNoOnPrimary), + newRetentionLease(node3, retainingSeqNoOnPrimary) + ); + testBatchAllocator.addData( + node1, + retentionLeases, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ) + .addData(node2, randomSyncId(), null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, randomSyncId(), null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED), empty()); + } + + private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders) { + return onePrimaryOnNode1And1Replica(deciders, Settings.EMPTY, UnassignedInfo.Reason.CLUSTER_RECOVERED); + } + + private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED); + IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardId.getIndexName()) + .settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())); + Metadata metadata = Metadata.builder().put(indexMetadata).build(); + // mark shard as delayed if reason is NODE_LEFT + boolean delayed = reason == UnassignedInfo.Reason.NODE_LEFT + && UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(settings).nanos() > 0; + int failedAllocations = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? 1 : 0; + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard) + .addShard( + ShardRouting.newUnassigned( + shardId, + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo( + reason, + null, + null, + failedAllocations, + System.nanoTime(), + System.currentTimeMillis(), + delayed, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.emptySet() + ) + ) + ) + .build() + ) + ) + .build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + } + + private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders, UnassignedInfo unassignedInfo) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(shardId.getIndexName()) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())) + ) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard) + .addShard( + TestShardRouting.newShardRouting( + shardId, + node2.getId(), + null, + false, + ShardRoutingState.INITIALIZING, + unassignedInfo + ) + ) + .build() + ) + ) + .build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + } + + private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { + return onePrimaryOnNode1And1ReplicaRecovering(deciders, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); + } + + static RetentionLease newRetentionLease(DiscoveryNode node, long retainingSeqNo) { + return new RetentionLease( + ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()), + retainingSeqNo, + randomNonNegativeLong(), + ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE + ); + } + + static String randomSyncId() { + return randomFrom("MATCH", "NOT_MATCH", null); + } + + class TestBatchAllocator extends ReplicaShardBatchAllocator { + private Map data = null; + private AtomicBoolean fetchDataCalled = new AtomicBoolean(false); + private AtomicInteger eligibleShardFetchDataCount = new AtomicInteger(0); + + public void clean() { + data = null; + } + + public boolean getFetchDataCalledAndClean() { + return fetchDataCalled.getAndSet(false); + } + + public int getShardEligibleFetchDataCountAndClean() { + return eligibleShardFetchDataCount.getAndSet(0); + } + + public TestBatchAllocator addData( + DiscoveryNode node, + String syncId, + @Nullable Exception storeFileFetchException, + StoreFileMetadata... files + ) { + return addData(node, Collections.emptyList(), syncId, storeFileFetchException, files); + } + + public TestBatchAllocator addData( + DiscoveryNode node, + List peerRecoveryRetentionLeases, + String syncId, + @Nullable Exception storeFileFetchException, + StoreFileMetadata... files + ) { + if (data == null) { + data = new HashMap<>(); + } + Map filesAsMap = new HashMap<>(); + for (StoreFileMetadata file : files) { + filesAsMap.put(file.name(), file); + } + Map commitData = new HashMap<>(); + if (syncId != null) { + commitData.put(Engine.SYNC_COMMIT_ID, syncId); + } + data.put( + node, + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata( + new TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata( + shardId, + new Store.MetadataSnapshot(unmodifiableMap(filesAsMap), unmodifiableMap(commitData), randomInt()), + peerRecoveryRetentionLeases + ), + storeFileFetchException + ) + ); + return this; + } + + @Override + protected AsyncShardFetch.FetchResult fetchData( + List eligibleShards, + List ineligibleShards, + RoutingAllocation allocation + ) { + fetchDataCalled.set(true); + eligibleShardFetchDataCount.set(eligibleShards.size()); + Map tData = null; + if (data != null) { + tData = new HashMap<>(); + for (Map.Entry entry : data.entrySet()) { + Map shardData = Map.of( + shardId, + entry.getValue() + ); + tData.put( + entry.getKey(), + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch(entry.getKey(), shardData) + ); + } + } + return new AsyncShardFetch.FetchResult<>(tData, new HashMap<>() { + { + put(shardId, Collections.emptySet()); + } + }); + } + + @Override + protected boolean hasInitiatedFetching(ShardRouting shard) { + return fetchDataCalled.get(); + } + } +} diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 97bc822be7d51..82d7ab06f126b 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -99,6 +99,7 @@ import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -261,8 +262,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e4ce879a5ec5e..474ec73d5fe61 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -1053,4 +1053,15 @@ public void testDefaultSearchPipeline() throws Exception { settings.updateIndexMetadata(metadata); assertEquals("foo", settings.getDefaultSearchPipeline()); } + + public void testIsOnRemoteNode() { + Version version = VersionUtils.getPreviousVersion(); + Settings theSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_INDEX_UUID, "0xdeadbeef") + .build(); + Settings nodeSettings = Settings.builder().put("node.attr.remote_store.translog.repository", "my-repo-1").build(); + IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), nodeSettings); + assertTrue("Index should be on remote node", settings.isAssignedOnRemoteNode()); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java new file mode 100644 index 0000000000000..65dd3b6447663 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexService; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +import static org.hamcrest.Matchers.containsString; + +public class ConstantKeywordFieldMapperTests extends OpenSearchSingleNodeTestCase { + + private IndexService indexService; + private DocumentMapperParser parser; + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testDefaultDisabledIndexMapper() throws Exception { + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "constant_keyword") + .field("value", "default_value") + .endObject() + .startObject("field2") + .field("type", "keyword") + .endObject(); + mapping = mapping.endObject().endObject().endObject(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.toString())); + + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { + b.field("field", "sdf"); + b.field("field2", "szdfvsddf"); + }))); + assertThat( + e.getMessage(), + containsString( + "failed to parse field [field] of type [constant_keyword] in document with id '1'. Preview of field's value: 'sdf'" + ) + ); + + final ParsedDocument doc = mapper.parse(source(b -> { + b.field("field", "default_value"); + b.field("field2", "field_2_value"); + })); + + final IndexableField field = doc.rootDoc().getField("field"); + + // constantKeywordField should not be stored + assertNull(field); + } + + public void testMissingDefaultIndexMapper() throws Exception { + + final XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "constant_keyword") + .endObject() + .startObject("field2") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject(); + + OpenSearchParseException e = expectThrows( + OpenSearchParseException.class, + () -> parser.parse("type", new CompressedXContent(mapping.toString())) + ); + assertThat(e.getMessage(), containsString("Field [field] is missing required parameter [value]")); + } + + private final SourceToParse source(CheckedConsumer build) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + build.accept(builder); + builder.endObject(); + return new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java new file mode 100644 index 0000000000000..235811539a299 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.query.QueryShardContext; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +public class ConstantKeywordFieldTypeTests extends FieldTypeTestCase { + + final ConstantKeywordFieldMapper.ConstantKeywordFieldType ft = new ConstantKeywordFieldMapper.ConstantKeywordFieldType( + "field", + "default" + ); + + public void testTermQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termQuery("default", createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termQuery("not_default", createContext())); + } + + public void testTermsQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termsQuery(Arrays.asList("default", "not_default"), createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Arrays.asList("no_default", "not_default"), createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(List.of(), createContext())); + } + + public void testInsensitiveTermQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termQueryCaseInsensitive("defaUlt", createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termQueryCaseInsensitive("not_defaUlt", createContext())); + } + + public void testPrefixQuery() { + assertEquals(new MatchAllDocsQuery(), ft.prefixQuery("defau", null, createContext())); + assertEquals(new MatchNoDocsQuery(), ft.prefixQuery("not_default", null, createContext())); + } + + public void testWildcardQuery() { + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("defa*lt", null, createContext())); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("no_defa*lt", null, createContext())); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("defa*", null, createContext())); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("*ult", null, createContext())); + + } + + public void testExistsQuery() { + assertEquals(new MatchAllDocsQuery(), ft.existsQuery(createContext())); + } + + private QueryShardContext createContext() { + IndexMetadata indexMetadata = IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + + Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index"); + return new QueryShardContext( + 0, + indexSettings, + null, + null, + null, + null, + null, + null, + xContentRegistry(), + writableRegistry(), + null, + null, + System::currentTimeMillis, + null, + indexNameMatcher, + () -> true, + null + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java new file mode 100644 index 0000000000000..72fb7c88cc478 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.opensearch.script.Script; + +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldTypeTests extends FieldTypeTestCase { + + private DerivedFieldType createDerivedFieldType(String type) { + Mapper.BuilderContext context = mock(Mapper.BuilderContext.class); + when(context.path()).thenReturn(new ContentPath()); + return new DerivedFieldType( + type + " _derived_field", + type, + new Script(""), + DerivedFieldSupportedTypes.getFieldMapperFromType(type, type + "_derived_field", context), + DerivedFieldSupportedTypes.getIndexableFieldGeneratorType(type, type + "_derived_field") + ); + } + + public void testBooleanType() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + assertTrue(dft.typeFieldMapper instanceof BooleanFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(true) instanceof Field); + assertTrue(dft.indexableFieldGenerator.apply(false) instanceof Field); + } + + public void testDateType() { + DerivedFieldType dft = createDerivedFieldType("date"); + assertTrue(dft.typeFieldMapper instanceof DateFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(System.currentTimeMillis()) instanceof LongPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testGeoPointType() { + DerivedFieldType dft = createDerivedFieldType("geo_point"); + assertTrue(dft.typeFieldMapper instanceof GeoPointFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(List.of(10.0, 20.0)) instanceof LatLonPoint); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of())); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of("10"))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0, 20.0, 30.0))); + } + + public void testIPType() { + DerivedFieldType dft = createDerivedFieldType("ip"); + assertTrue(dft.typeFieldMapper instanceof IpFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("127.0.0.1") instanceof InetAddressPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testKeywordType() { + DerivedFieldType dft = createDerivedFieldType("keyword"); + assertTrue(dft.typeFieldMapper instanceof KeywordFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("test_keyword") instanceof KeywordField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10)); + } + + public void testLongType() { + DerivedFieldType dft = createDerivedFieldType("long"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10) instanceof LongField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10.0)); + } + + public void testDoubleType() { + DerivedFieldType dft = createDerivedFieldType("double"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10.0) instanceof DoubleField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("")); + } + + public void testUnsupportedType() { + expectThrows(IllegalArgumentException.class, () -> createDerivedFieldType("match_only_text")); + } +} diff --git a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java new file mode 100644 index 0000000000000..18d117fa8c0f5 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldQueryTests extends OpenSearchTestCase { + + private static final String[][] raw_requests = new String[][] { + { "40.135.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "40.135.0.0" }, + { "232.0.0.0 GET /images/hm_bg.jpg HTTP/1.0", "400", "232.0.0.0" }, + { "26.1.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "26.1.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" } }; + + public void testDerivedField() throws IOException { + // Create lucene documents + List docs = new ArrayList<>(); + for (String[] request : raw_requests) { + Document document = new Document(); + document.add(new TextField("raw_request", request[0], Field.Store.YES)); + document.add(new KeywordField("status", request[1], Field.Store.YES)); + docs.add(document); + } + + // Mock SearchLookup + SearchLookup searchLookup = mock(SearchLookup.class); + SourceLookup sourceLookup = new SourceLookup(); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(leafLookup.source()).thenReturn(sourceLookup); + + // Mock DerivedFieldScript.Factory + DerivedFieldScript.Factory factory = (params, lookup) -> (DerivedFieldScript.LeafFactory) ctx -> { + when(searchLookup.getLeafSearchLookup(ctx)).thenReturn(leafLookup); + return new DerivedFieldScript(params, lookup, ctx) { + @Override + public Object execute() { + return raw_requests[sourceLookup.docId()][2]; + } + }; + }; + + // Create ValueFetcher from mocked DerivedFieldScript.Factory + DerivedFieldScript.LeafFactory leafFactory = factory.newFactory((new Script("")).getParams(), searchLookup); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(leafFactory); + + // Create DerivedFieldQuery + DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( + new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), + valueFetcher, + searchLookup, + (o -> new KeywordField("ip_from_raw_request", (String) o, Field.Store.NO)), + Lucene.STANDARD_ANALYZER + ); + + // Index and Search + + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + for (Document d : docs) { + iw.addDocument(d); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(derivedFieldQuery, 10); + assertEquals(2, topDocs.totalHits.value); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java new file mode 100644 index 0000000000000..33008bee1a392 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.parseString; + +public class RemoteStoreEnumsTests extends OpenSearchTestCase { + + private static final String SEPARATOR = "/"; + + public void testParseString() { + // Case 1 - Pass values from the enum. + String typeString = FIXED.toString(); + PathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + assertEquals(FIXED, type); + + typeString = PathType.HASHED_PREFIX.toString(); + type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + assertEquals(PathType.HASHED_PREFIX, type); + + // Case 2 - Pass random string + String randomTypeString = randomAlphaOfLength(2); + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> parseString(randomTypeString)); + assertEquals("Could not parse PathType for [" + randomTypeString + "]", ex.getMessage()); + + // Case 3 - Null string + ex = assertThrows(IllegalArgumentException.class, () -> parseString(null)); + assertEquals("Could not parse PathType for [null]", ex.getMessage()); + } + + public void testGeneratePathForFixedType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId + SEPARATOR; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = FIXED.path(pathInput, null); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> FIXED.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Segment Metadata + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + } + + private List getPathList() { + List pathList = new ArrayList<>(); + int length = randomIntBetween(0, 5); + for (int i = 0; i < length; i++) { + pathList.add(randomAlphaOfLength(randomIntBetween(2, 5))); + } + return pathList; + } + + private String getPath(List pathList) { + String p = String.join(SEPARATOR, pathList); + if (p.isEmpty() || p.endsWith(SEPARATOR)) { + return p; + } + return p + SEPARATOR; + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java index e072d3037caad..043b4493e8989 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java @@ -10,6 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; @@ -37,4 +38,22 @@ static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) when(indexShard.store()).thenReturn(store); return indexShard; } + + public static IndexSettings createIndexSettings(boolean remote) { + return createIndexSettings(remote, Settings.EMPTY); + } + + public static IndexSettings createIndexSettings(boolean remote, Settings settings) { + IndexSettings indexSettings; + if (remote) { + Settings nodeSettings = Settings.builder() + .put("node.name", "xyz") + .put("node.attr.remote_store.translog.repository", "seg_repo") + .build(); + indexSettings = IndexSettingsModule.newIndexSettings(new Index("test_index", "_na_"), settings, nodeSettings); + } else { + indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + } + return indexSettings; + } } diff --git a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java index 33e08a482b9c3..ec1600094084a 100644 --- a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java @@ -142,7 +142,7 @@ public void run() { IndexShard replica = shards.addReplica(); Future future = shards.asyncRecoverReplica( replica, - (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener) { + (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener, threadPool) { @Override public void cleanFiles( int totalTranslogOps, @@ -223,17 +223,20 @@ public IndexResult index(Index op) throws IOException { }); thread.start(); IndexShard replica = shards.addReplica(); - Future fut = shards.asyncRecoverReplica(replica, (shard, node) -> new RecoveryTarget(shard, node, recoveryListener) { - @Override - public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { - try { - indexedOnPrimary.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); + Future fut = shards.asyncRecoverReplica( + replica, + (shard, node) -> new RecoveryTarget(shard, node, recoveryListener, threadPool) { + @Override + public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { + try { + indexedOnPrimary.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + super.prepareForTranslogOperations(totalTranslogOps, listener); } - super.prepareForTranslogOperations(totalTranslogOps, listener); } - }); + ); fut.get(); recoveryDone.countDown(); thread.join(); diff --git a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java index 17b5440ab5424..b891ac63378ac 100644 --- a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java @@ -72,6 +72,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; @@ -118,7 +119,8 @@ public void testIndexingDuringFileRecovery() throws Exception { indexShard, node, recoveryListener, - logger + logger, + threadPool ) ); @@ -482,7 +484,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { AtomicBoolean recoveryDone = new AtomicBoolean(false); final Future recoveryFuture = shards.asyncRecoverReplica(newReplica, (indexShard, node) -> { recoveryStart.countDown(); - return new RecoveryTarget(indexShard, node, recoveryListener) { + return new RecoveryTarget(indexShard, node, recoveryListener, threadPool) { @Override public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener listener) { recoveryDone.set(true); @@ -536,7 +538,7 @@ protected EngineFactory getEngineFactory(final ShardRouting routing) { final IndexShard replica = shards.addReplica(); final Future recoveryFuture = shards.asyncRecoverReplica( replica, - (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener) { + (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener, threadPool) { @Override public void indexTranslogOperations( final List operations, @@ -812,9 +814,10 @@ public BlockingTarget( IndexShard shard, DiscoveryNode sourceNode, ReplicationListener listener, - Logger logger + Logger logger, + ThreadPool threadPool ) { - super(shard, sourceNode, listener); + super(shard, sourceNode, listener, threadPool); this.recoveryBlocked = recoveryBlocked; this.releaseRecovery = releaseRecovery; this.stageToBlock = stageToBlock; diff --git a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java index 8c59e92a3fe8a..904c9a70e61e0 100644 --- a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java @@ -45,6 +45,7 @@ import org.opensearch.index.seqno.RetentionLeaseUtils; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -182,7 +183,8 @@ public void testTurnOffTranslogRetentionAfterAllShardStarted() throws Exception null, 1L, group.getPrimary().getReplicationGroup().getInSyncAllocationIds(), - group.getPrimary().getReplicationGroup().getRoutingTable() + group.getPrimary().getReplicationGroup().getRoutingTable(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); } group.syncGlobalCheckpoint(); diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 5656b77445772..594700ea60b3e 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -57,9 +57,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -128,6 +128,7 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); + assertEquals(equalTo, stats.getSearchIdleReactivateCount()); // avg_concurrency is not summed up across stats assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 8363ea3757a2b..a27f3476888eb 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -54,6 +54,7 @@ import java.util.Collections; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -110,6 +111,7 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { final ShardId shardId = new ShardId(index, id); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST); when(indexShard.getTranslogDurability()).thenReturn(durability); @@ -158,14 +160,14 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { final GlobalCheckpointSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final GlobalCheckpointSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } @@ -191,6 +193,7 @@ public void testMayBeSyncTranslogWithRemoteTranslog() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(globalCheckpoint - 1); when(indexShard.getTranslogDurability()).thenReturn(Translog.Durability.REQUEST); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); verify(indexShard, never()).sync(); @@ -205,6 +208,7 @@ public void testMayBeSyncTranslogWithLocalTranslog() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(globalCheckpoint - 1); when(indexShard.getTranslogDurability()).thenReturn(Translog.Durability.REQUEST); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); verify(indexShard).sync(); diff --git a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java index ca80c7b9c4884..7a9f1d7baa12e 100644 --- a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java @@ -93,7 +93,8 @@ public void setUpReplicationTracker() throws InterruptedException { value -> {}, currentTimeMillis::get, (leases, listener) -> {}, - () -> safeCommitInfo + () -> safeCommitInfo, + sId -> false ); replicationTracker.updateFromClusterManager( 1L, diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 3cd60ac973709..fdbe89422a2aa 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -84,7 +84,8 @@ public void testAddOrRenewRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -132,7 +133,8 @@ public void testAddDuplicateRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -164,7 +166,8 @@ public void testRenewNotFoundRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -202,7 +205,8 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { equalTo(retainingSequenceNumbers) ); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); reference.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -241,7 +245,8 @@ public void testRemoveRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -305,7 +310,8 @@ public void testCloneRetentionLease() { assertTrue(synced.compareAndSet(false, true)); listener.onResponse(new ReplicationResponse()); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTrackerRef.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -351,7 +357,8 @@ public void testCloneNonexistentRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -380,7 +387,8 @@ public void testCloneDuplicateRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -413,7 +421,8 @@ public void testRemoveNotFound() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -451,7 +460,8 @@ public void testRemoveRetentionLeaseCausesRetentionLeaseSync() { equalTo(retainingSequenceNumbers) ); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); reference.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -504,7 +514,8 @@ private void runExpirationTest(final boolean primaryMode) { value -> {}, currentTimeMillis::get, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -583,7 +594,8 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -636,7 +648,8 @@ public void testLoadAndPersistRetentionLeases() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -671,7 +684,8 @@ public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -724,7 +738,8 @@ public void testPersistRetentionLeasesUnderConcurrency() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -788,7 +803,8 @@ public void testRenewLeaseWithLowerRetainingSequenceNumber() throws Exception { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java index e61d27695a5e5..daeefeff59c94 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java @@ -40,11 +40,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.SafeCommitInfo; +import org.opensearch.index.remote.RemoteStoreTestsHelper; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; import java.util.Set; +import java.util.function.Function; import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -57,18 +59,20 @@ ReplicationTracker newTracker( final AllocationId allocationId, final LongConsumer updatedGlobalCheckpoint, final LongSupplier currentTimeMillisSupplier, - final Settings settings + final Settings settings, + final boolean remote ) { return new ReplicationTracker( new ShardId("test", "_na_", 0), allocationId.getId(), - IndexSettingsModule.newIndexSettings("test", settings), + remote ? RemoteStoreTestsHelper.createIndexSettings(true, settings) : IndexSettingsModule.newIndexSettings("test", settings), randomNonNegativeLong(), UNASSIGNED_SEQ_NO, updatedGlobalCheckpoint, currentTimeMillisSupplier, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + remote ? REMOTE_DISCOVERY_NODE : NON_REMOTE_DISCOVERY_NODE ); } @@ -80,8 +84,21 @@ ReplicationTracker newTracker( return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, Settings.EMPTY); } + ReplicationTracker newTracker( + final AllocationId allocationId, + final LongConsumer updatedGlobalCheckpoint, + final LongSupplier currentTimeMillisSupplier, + final Settings settings + ) { + return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, settings, false); + } + static final Supplier OPS_BASED_RECOVERY_ALWAYS_REASONABLE = () -> SafeCommitInfo.EMPTY; + static final Function NON_REMOTE_DISCOVERY_NODE = shardId -> false; + + static final Function REMOTE_DISCOVERY_NODE = shardId -> true; + static String nodeIdFromAllocationId(final AllocationId allocationId) { return "n-" + allocationId.getId().substring(0, 8); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 7971591e82bab..233a99cbe4a73 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -446,6 +446,10 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { private AtomicLong updatedGlobalCheckpoint = new AtomicLong(UNASSIGNED_SEQ_NO); + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings, boolean remote) { + return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings, remote); + } + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings) { return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings); } @@ -759,7 +763,8 @@ public void testPrimaryContextHandoff() throws IOException { onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); ReplicationTracker newPrimary = new ReplicationTracker( shardId, @@ -770,7 +775,8 @@ public void testPrimaryContextHandoff() throws IOException { onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); @@ -1300,7 +1306,7 @@ public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); @@ -1378,7 +1384,7 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); @@ -1476,7 +1482,7 @@ public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Excep .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); tracker.activatePrimaryMode(localCheckpoint); @@ -1504,7 +1510,7 @@ public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); List initializingRandomSubset = randomSubsetOf(initializing.keySet()); @@ -1537,7 +1543,7 @@ public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach( @@ -1606,8 +1612,8 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTrans .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); - tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, active, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -1655,7 +1661,7 @@ public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); @@ -2080,7 +2086,8 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + REMOTE_DISCOVERY_NODE ); ReplicationTracker newPrimary = new ReplicationTracker( shardId, @@ -2091,7 +2098,8 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + REMOTE_DISCOVERY_NODE ); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index ed04d9a20f18e..d5d7163b66698 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -60,6 +60,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; @@ -215,14 +216,14 @@ public void testBlocks() { public void testGetReplicationModeWithRemoteTranslog() { final RetentionLeaseBackgroundSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final RetentionLeaseBackgroundSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 63a9ac2f2e8ec..7610b8bc39296 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -60,6 +60,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.emptyMap; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; @@ -215,15 +216,15 @@ public void testBlocks() { public void testGetReplicationModeWithRemoteTranslog() { final RetentionLeaseSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final RetentionLeaseSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); - assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } private RetentionLeaseSyncAction createAction() { diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 46be10ce62840..e5bfa8caee79a 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -697,7 +697,8 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build() + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(primaryRouting) ); /* @@ -764,7 +765,8 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), - new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build() + new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(primaryRouting) ); latch.await(); assertThat(indexShard.getActiveOperationsCount(), is(oneOf(0, IndexShard.OPERATIONS_BLOCKED))); @@ -1446,7 +1448,8 @@ public void onFailure(Exception e) { (s, r) -> resyncLatch.countDown(), 1L, Collections.singleton(newRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build() + new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(newRouting) ); resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); @@ -3208,7 +3211,7 @@ public void testTranslogRecoverySyncsTranslog() throws IOException { indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); IndexShard replica = newShard(primary.shardId(), false, "n2", metadata, null); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) { + recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, threadPool) { @Override public void indexTranslogOperations( final List operations, @@ -3284,7 +3287,7 @@ public void testRecoverFromTranslog() throws IOException { Translog.Snapshot snapshot = TestTranslog.newSnapshotFromOperations(operations); primary.markAsRecovering( "store", - new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) + new RecoveryState(primary.routingEntry(), IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) ); recoverFromStore(primary); @@ -3340,7 +3343,7 @@ public void testShardActiveDuringPeerRecovery() throws IOException { replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); // Shard is still inactive since we haven't started recovering yet assertFalse(replica.isActive()); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) { + recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, threadPool) { @Override public void indexTranslogOperations( final List operations, @@ -3397,7 +3400,7 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); assertListenerCalled.accept(replica); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) { + recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, threadPool) { // we're only checking that listeners are called when the engine is open, before there is no point @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { @@ -4029,15 +4032,19 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { if (isPrimary) { newShard.markAsRecovering( "store", - new RecoveryState(newShard.routingEntry(), getFakeDiscoNode(newShard.routingEntry().currentNodeId()), null) + new RecoveryState( + newShard.routingEntry(), + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()), + null + ) ); } else { newShard.markAsRecovering( "peer", new RecoveryState( newShard.routingEntry(), - getFakeDiscoNode(newShard.routingEntry().currentNodeId()), - getFakeDiscoNode(newShard.routingEntry().currentNodeId()) + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()), + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()) ) ); } diff --git a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java index b1bcaac2c1947..09903a8b44cb5 100644 --- a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java @@ -111,7 +111,8 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build() + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint()); @@ -190,7 +191,8 @@ public void testSyncerOnClosingShard() throws Exception { null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build() + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); CountDownLatch syncCalledLatch = new CountDownLatch(1); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 85878cc2e1c9d..33f6c67b94b3d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -34,7 +34,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -249,7 +249,7 @@ public void testAfterMultipleCommits() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); - for (int i = 0; i < indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { + for (int i = 0; i < indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { indexDocs(4 * (i + 1), 4); flushShard(indexShard); } @@ -635,9 +635,9 @@ private Tuple mockIn RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); - RecoverySettings recoverySettings = mock(RecoverySettings.class); - when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); - when(shard.getRecoverySettings()).thenReturn(recoverySettings); + RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); + when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); + when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 4f5cad70fd643..85864eebd6d0d 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -86,7 +86,7 @@ public void testStartSequenceForReplicaRecovery() throws Exception { ); shards.addReplica(newReplicaShard); AtomicBoolean assertDone = new AtomicBoolean(false); - shards.recoverReplica(newReplicaShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { + shards.recoverReplica(newReplicaShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool) { @Override public IndexShard indexShard() { IndexShard idxShard = super.indexShard(); diff --git a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java index 98f86758ea2ca..c61c13eecf2c3 100644 --- a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java @@ -64,6 +64,7 @@ public void testListenersAreExecuted() { AtomicInteger newScrollContext = new AtomicInteger(); AtomicInteger freeScrollContext = new AtomicInteger(); AtomicInteger validateSearchContext = new AtomicInteger(); + AtomicInteger searchIdleReactivateCount = new AtomicInteger(); AtomicInteger timeInNanos = new AtomicInteger(randomIntBetween(0, 10)); SearchOperationListener listener = new SearchOperationListener() { @Override @@ -133,6 +134,11 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertNotNull(readerContext); validateSearchContext.incrementAndGet(); } + + @Override + public void onSearchIdleReactivation() { + searchIdleReactivateCount.incrementAndGet(); + } }; SearchOperationListener throwingListener = (SearchOperationListener) Proxy.newProxyInstance( @@ -169,6 +175,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFetchPhase(ctx, timeInNanos.get()); @@ -182,6 +189,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onPreQueryPhase(ctx); @@ -195,6 +203,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onPreFetchPhase(ctx); @@ -208,6 +217,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFailedFetchPhase(ctx); @@ -221,6 +231,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFailedQueryPhase(ctx); @@ -234,6 +245,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onNewReaderContext(mock(ReaderContext.class)); @@ -247,6 +259,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onNewScrollContext(mock(ReaderContext.class)); @@ -260,6 +273,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFreeReaderContext(mock(ReaderContext.class)); @@ -273,6 +287,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFreeScrollContext(mock(ReaderContext.class)); @@ -286,6 +301,21 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(2, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); + assertEquals(0, validateSearchContext.get()); + + compositeListener.onSearchIdleReactivation(); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(2, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); if (throwingListeners == 0) { @@ -311,6 +341,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(2, freeScrollContext.get()); + assertEquals(2, searchIdleReactivateCount.get()); assertEquals(2, validateSearchContext.get()); } } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 7caff3e5f5479..e93d266dcab4c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -256,7 +256,7 @@ public void onDone(ReplicationState state) { public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertEquals(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), "Expected failure"); } - }), + }, threadPool), true, true, replicatePrimaryFunction diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 8b69c15dac9d3..11b4eb078226f 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,6 +37,9 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -697,13 +700,23 @@ public void testCleanupAsync() throws Exception { threadPool, indexShard.shardId() ); - when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any())).thenReturn(remoteSegmentDirectory); + when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any(), any())).thenReturn(remoteSegmentDirectory); String repositoryName = "test-repository"; String indexUUID = "test-idx-uuid"; ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); + RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy( + randomFrom(PathType.values()), + randomFrom(PathHashAlgorithm.values()) + ); - RemoteSegmentStoreDirectory.remoteDirectoryCleanup(remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId); - verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId); + RemoteSegmentStoreDirectory.remoteDirectoryCleanup( + remoteSegmentStoreDirectoryFactory, + repositoryName, + indexUUID, + shardId, + pathStrategy + ); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathStrategy); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); verify(remoteMetadataDirectory).delete(); verify(remoteDataDirectory).delete(); diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java index 897785849cf7b..de3dfbdaa4778 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java @@ -11,6 +11,8 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; @@ -48,6 +50,7 @@ public void testNewLockManager() throws IOException { String testRepository = "testRepository"; String testIndexUUID = "testIndexUUID"; String testShardId = "testShardId"; + RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy(PathType.FIXED); BlobStoreRepository repository = mock(BlobStoreRepository.class); BlobStore blobStore = mock(BlobStore.class); @@ -59,7 +62,12 @@ public void testNewLockManager() throws IOException { when(repositoriesService.repository(testRepository)).thenReturn(repository); - RemoteStoreLockManager lockManager = remoteStoreLockManagerFactory.newLockManager(testRepository, testIndexUUID, testShardId); + RemoteStoreLockManager lockManager = remoteStoreLockManagerFactory.newLockManager( + testRepository, + testIndexUUID, + testShardId, + pathStrategy + ); assertTrue(lockManager != null); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index a83e737dc25c1..70800d4fd423a 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -99,7 +99,7 @@ import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; -import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.contains; @@ -219,8 +219,9 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting new ByteSizeValue(8, ByteSizeUnit.KB), new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) ); - - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + // To simulate that the node is remote backed + Settings nodeSettings = Settings.builder().put("node.attr.remote_store.translog.repository", "my-repo-1").build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings, nodeSettings); return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } @@ -906,7 +907,7 @@ public void testDrainSync() throws Exception { } private BlobPath getTranslogDirectory() { - return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG); + return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG.getName()); } private Long populateTranslogOps(boolean withMissingOps) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index e34bc078896f9..49719017ce736 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -48,6 +48,8 @@ import org.mockito.Mockito; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; @@ -95,7 +97,8 @@ public void setUp() throws Exception { translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -159,7 +162,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -194,7 +198,8 @@ public void testTransferSnapshotOnUploadTimeout() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -235,7 +240,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -333,7 +339,8 @@ public void testReadMetadataNoFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -354,7 +361,8 @@ public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -390,7 +398,8 @@ public void testReadMetadataReadException() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -426,7 +435,8 @@ public void testReadMetadataListException() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -499,7 +509,8 @@ public void testDeleteTranslogSuccess() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, blobStoreTransferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -518,7 +529,8 @@ public void testDeleteStaleTranslogMetadata() { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -569,7 +581,8 @@ public void testDeleteTranslogFailure() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, blobStoreTransferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -612,7 +625,8 @@ public void testMetadataConflict() throws InterruptedException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 5e6398da6fa1b..0428bdf0655b0 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingHelper; @@ -149,22 +150,27 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting = newRouting.moveToUnassigned(unassignedInfo) .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); + final DiscoveryNode localNode = new DiscoveryNode( + "foo", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); IndexShard shard = index.createShard( newRouting, s -> {}, RetentionLeaseSyncer.EMPTY, SegmentReplicationCheckpointPublisher.EMPTY, - null + null, + null, + localNode, + null, + DiscoveryNodes.builder().add(localNode).build() ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); - final DiscoveryNode localNode = new DiscoveryNode( - "foo", - buildNewFakeTransportAddress(), - emptyMap(), - emptySet(), - Version.CURRENT - ); + shard.markAsRecovering("store", new RecoveryState(newRouting, localNode, null)); IndexShardTestCase.recoverFromStore(shard); newRouting = ShardRoutingHelper.moveToStarted(newRouting); diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 742dbdeba8c5b..6757dbc184961 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -622,6 +622,9 @@ public void testConflictingEngineFactories() { public void testClusterRemoteTranslogBufferIntervalDefault() { IndicesService indicesService = getIndicesService(); - assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals( + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() + ); } } diff --git a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java new file mode 100644 index 0000000000000..3809a44e55901 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +public class RemoteStoreSettingsDynamicUpdateTests extends OpenSearchTestCase { + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(Settings.EMPTY, clusterSettings); + + public void testSegmentMetadataRetention() { + // Default value + assertEquals(10, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5) + .build() + ); + assertEquals(5, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting min value + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1) + .build() + ); + assertEquals(-1, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value > default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15) + .build() + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value to 0 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < -1 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index c455101ff4549..0490228a5cc16 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -264,7 +265,8 @@ public MockIndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final DiscoveryNodes discoveryNodes ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); @@ -387,7 +389,8 @@ public void updateShardState( BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable + IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException { failRandomly(); assertThat(this.shardId(), equalTo(shardRouting.shardId())); diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java index 4fbae4b0d53ca..ded174fb98eef 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -38,6 +38,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.test.NodeRoles; @@ -65,8 +66,8 @@ public void testDuplicateRecoveries() throws IOException { StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest( primary.shardId(), randomAlphaOfLength(10), - getFakeDiscoNode("source"), - getFakeDiscoNode("target"), + IndexShardTestUtils.getFakeDiscoNode("source"), + IndexShardTestUtils.getFakeDiscoNode("target"), Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 34f854cae56ba..a8e5a02011538 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -56,6 +56,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; @@ -92,10 +93,10 @@ public void testWriteFileChunksConcurrently() throws Exception { mdFiles.add(md); } final IndexShard targetShard = newShard(false); - final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); - final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); + final DiscoveryNode pNode = IndexShardTestUtils.getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); + final DiscoveryNode rNode = IndexShardTestUtils.getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode)); - final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null); + final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null, threadPool); final PlainActionFuture receiveFileInfoFuture = new PlainActionFuture<>(); recoveryTarget.receiveFileInfo( mdFiles.stream().map(StoreFileMetadata::name).collect(Collectors.toList()), @@ -355,7 +356,7 @@ public void testResetStartingSeqNoIfLastCommitCorrupted() throws Exception { shard.prepareForIndexRecovery(); long startingSeqNo = shard.recoverLocallyAndFetchStartSeqNo(true); shard.store().markStoreCorrupted(new IOException("simulated")); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, threadPool); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(logger, rNode, recoveryTarget, startingSeqNo); assertThat(request.startingSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(request.metadataSnapshot().size(), equalTo(0)); @@ -396,7 +397,7 @@ public void testResetStartRequestIfTranslogIsCorrupted() throws Exception { shard = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE)); shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode)); shard.prepareForIndexRecovery(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, threadPool); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest( logger, rNode, diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java index 18e7dfb375132..75639661f539d 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -96,49 +96,4 @@ public void testInternalLongActionTimeout() { ); assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); } - - public void testSegmentMetadataRetention() { - // Default value - assertEquals(10, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5).build() - ); - assertEquals(5, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting min value - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1).build() - ); - assertEquals(-1, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value > default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15).build() - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value to 0 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < -1 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index ad90255a3cc3f..71d89e2856c6e 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -137,7 +137,8 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { indexShard, node, recoveryListener, - logger + logger, + threadPool ) ); recoveryBlocked.await(); @@ -348,7 +349,7 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { } IndexShard replicaShard = newShard(primaryShard.shardId(), false); updateMappings(replicaShard, primaryShard.indexSettings().getIndexMetadata()); - recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { + recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool) { @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { super.prepareForTranslogOperations(totalTranslogOps, listener); @@ -480,7 +481,7 @@ public void onDone(ReplicationState state) { public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); } - })) + }, threadPool)) ); expectThrows(AlreadyClosedException.class, () -> replica.refresh("test")); group.removeReplica(replica); diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 2cf006176022d..352f827c74cb2 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -15,6 +15,7 @@ import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; @@ -35,9 +36,12 @@ import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -128,7 +132,9 @@ public void testPublishCheckpointActionOnReplica() { final ShardId shardId = new ShardId(index, id); when(indexShard.shardId()).thenReturn(shardId); - + when(indexShard.indexSettings()).thenReturn( + createIndexSettings(false, Settings.builder().put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), "SEGMENT").build()) + ); final SegmentReplicationTargetService mockTargetService = mock(SegmentReplicationTargetService.class); final PublishCheckpointAction action = new PublishCheckpointAction( @@ -160,17 +166,46 @@ public void testPublishCheckpointActionOnReplica() { } + public void testPublishCheckpointActionOnDocrepReplicaDuringMigration() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + final SegmentReplicationTargetService mockTargetService = mock(SegmentReplicationTargetService.class); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + mockTargetService + ); + // no interaction with SegmentReplicationTargetService object + verify(mockTargetService, never()).onNewCheckpoint(any(), any()); + } + public void testGetReplicationModeWithRemoteTranslog() { final PublishCheckpointAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final PublishCheckpointAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java index 9ce28bc7fdb40..6836ecb3d615f 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java @@ -49,7 +49,8 @@ public void testSettingsExists() { Arrays.asList( CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT + CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT ) ) ); @@ -149,4 +150,33 @@ public void testUpdateAfterGetConfiguredSettings() { assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); } + + public void testConfiguredSettingsForAdmin() { + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 50) + .build(); + + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getClusterAdminCPULimit().longValue(), 50); + + Settings updatedSettings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 90) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(cpuBasedAdmissionControllerSettings.getClusterAdminCPULimit().longValue(), 90); + + } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java index ff777c175ec0e..c462f9700264d 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java @@ -72,6 +72,10 @@ public void testDefaultSettings() { assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), percent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); } public void testGetConfiguredSettings() { @@ -134,6 +138,10 @@ public void testUpdateAfterGetConfiguredSettings() { assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); Settings updatedSettings = Settings.builder() .put( @@ -146,6 +154,10 @@ public void testUpdateAfterGetConfiguredSettings() { assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); searchPercent = 70; updatedSettings = Settings.builder() @@ -156,5 +168,9 @@ public void testUpdateAfterGetConfiguredSettings() { clusterService.getClusterSettings().applySettings(updatedSettings); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); } } diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index fb4dc97435512..4ce4e28690697 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -225,6 +225,6 @@ long startRecovery( final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId()); indexShard.markAsRecovering("remote", new RecoveryState(indexShard.routingEntry(), sourceNode, rNode)); indexShard.prepareForIndexRecovery(); - return collection.start(new RecoveryTarget(indexShard, sourceNode, listener), timeValue); + return collection.start(new RecoveryTarget(indexShard, sourceNode, listener, threadPool), timeValue); } } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index fa13ec2036797..883df7da5d717 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -125,7 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); - assertThat(headers.get(78).value, equalTo("docs.deleted")); + assertThat(headers.get(79).value, equalTo("docs.deleted")); final List> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -141,9 +141,9 @@ public void testBuildTable() { assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(76).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(77).value, equalTo(shardStats.getStatePath())); - assertThat(row.get(78).value, equalTo(shardStats.getStats().getDocs().getDeleted())); + assertThat(row.get(77).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(78).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(79).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 3793249d569f0..a1a808c9faa9b 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -93,6 +93,7 @@ import java.util.function.Function; import java.util.function.Supplier; +import static org.opensearch.index.IndexSettings.INDEX_SEARCH_THROTTLED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; @@ -168,6 +169,7 @@ public void testPreProcess() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); when(indexService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -486,6 +488,14 @@ public void testClearQueryCancellationsOnClose() throws IOException { when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class), anyBoolean())).thenReturn( queryShardContext ); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -551,7 +561,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } } - public void testSearchPathEvaluationUsingSortField() throws Exception { + public void testSearchPathEvaluation() throws Exception { ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); @@ -578,9 +588,24 @@ public void testSearchPathEvaluationUsingSortField() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); when(indexService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + IndexShard systemIndexShard = mock(IndexShard.class); + when(systemIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(systemIndexShard.getThreadPool()).thenReturn(threadPool); + when(systemIndexShard.isSystem()).thenReturn(true); + + IndexShard throttledIndexShard = mock(IndexShard.class); + when(throttledIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(throttledIndexShard.getThreadPool()).thenReturn(threadPool); + IndexSettings throttledIndexSettings = new IndexSettings( + indexMetadata, + Settings.builder().put(INDEX_SEARCH_THROTTLED.getKey(), true).build() + ); + when(throttledIndexShard.indexSettings()).thenReturn(throttledIndexSettings); + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { final Supplier searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { @@ -697,6 +722,62 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // Case 4: With a system index concurrent segment search is not used + readerContext = new ReaderContext( + newContextId(), + indexService, + systemIndexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case 5: When search is throttled concurrent segment search is not used + readerContext = new ReaderContext( + newContextId(), + indexService, + throttledIndexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // shutdown the threadpool threadPool.shutdown(); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java index d68b0911d3d01..c10233a72921f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java @@ -187,10 +187,16 @@ protected LeafSlice[] slices(List leaves) { AggregationCollectorManager collectorManager; if (expectedNonGlobalAggsPerSlice > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(NonGlobalAggCollectorManager.class); + for (Collector c : nonGlobalCollectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } collectorManager.reduce(nonGlobalCollectors).reduce(context.queryResult()); } if (expectedGlobalAggs > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(GlobalAggCollectorManager.class); + for (Collector c : globalCollectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } ReduceableSearchResult result = collectorManager.reduce(globalCollectors); doReturn(result).when(testSearcher) .search(nullable(Query.class), ArgumentMatchers.>any()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java index e9b2d40fd4ede..644cee57bd5a4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java @@ -50,6 +50,7 @@ import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.mapper.BinaryFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.FlatObjectFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -104,7 +105,8 @@ protected List unsupportedMappedFieldTypes() { return Arrays.asList( BinaryFieldMapper.CONTENT_TYPE, // binary fields are not supported because they do not have analyzers GeoPointFieldMapper.CONTENT_TYPE, // geopoint fields cannot use term queries - FlatObjectFieldMapper.CONTENT_TYPE // flat_object fields are not supported aggregations + FlatObjectFieldMapper.CONTENT_TYPE, // flat_object fields are not supported aggregations + ConstantKeywordFieldMapper.CONTENT_TYPE // binary fields are not supported because they do not have analyzers ); } diff --git a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java index b5d34a78ab5a4..b00f36ef52d4a 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java @@ -100,6 +100,7 @@ public void testProcessRelationSupport() throws Exception { client().prepareSearch("test") .setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, rectangle).relation(shapeRelation)) .get(); + fail("Expected " + shapeRelation + " query relation not supported for Field [" + defaultGeoFieldName + "]"); } catch (SearchPhaseExecutionException e) { assertThat( e.getCause().getMessage(), @@ -119,6 +120,7 @@ public void testQueryLine() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, line)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support LINEARRING queries"); } catch (SearchPhaseExecutionException e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.LINESTRING + " queries")); } @@ -138,13 +140,12 @@ public void testQueryLinearRing() throws Exception { searchRequestBuilder.setQuery(queryBuilder); searchRequestBuilder.setIndices("test"); searchRequestBuilder.get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support LINEARRING queries"); } catch (SearchPhaseExecutionException e) { assertThat( e.getCause().getMessage(), containsString("Field [" + defaultGeoFieldName + "] does not support LINEARRING queries") ); - } catch (UnsupportedOperationException e) { - assertThat(e.getMessage(), containsString("line ring cannot be serialized using GeoJson")); } } @@ -162,6 +163,7 @@ public void testQueryMultiLine() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, multiline)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support " + GeoShapeType.MULTILINESTRING + " queries"); } catch (Exception e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.MULTILINESTRING + " queries")); } @@ -177,6 +179,7 @@ public void testQueryMultiPoint() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, multiPoint)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support " + GeoShapeType.MULTIPOINT + " queries"); } catch (Exception e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.MULTIPOINT + " queries")); } @@ -192,6 +195,7 @@ public void testQueryPoint() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, point)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support " + GeoShapeType.POINT + " queries"); } catch (Exception e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.POINT + " queries")); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 635939e68de71..4326e5fc63961 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -103,6 +103,8 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.TransportAction; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.TransportGetTermVersionAction; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateHelper; import org.opensearch.client.AdminClient; @@ -190,6 +192,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -2075,7 +2078,8 @@ public void onFailure(final Exception e) { null, new RemoteStoreStatsTrackerFactory(clusterService, settings), DefaultRecoverySettings.INSTANCE, - new CacheModule(new ArrayList<>(), settings).getCacheService() + new CacheModule(new ArrayList<>(), settings).getCacheService(), + DefaultRemoteStoreSettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -2437,6 +2441,18 @@ public void onFailure(final Exception e) { indexNameExpressionResolver ) ); + + actions.put( + GetTermVersionAction.INSTANCE, + new TransportGetTermVersionAction( + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ) + ); + DynamicActionRegistry dynamicActionRegistry = new DynamicActionRegistry(); dynamicActionRegistry.registerUnmodifiableActionMap(actions); client.initialize( diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 0ca4797bfeff1..a6275f200217a 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -62,7 +62,7 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.5.0' + api 'net.minidev:json-smart:2.5.1' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 43289a7c89524..1cb5501810c5d 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -898,7 +898,8 @@ public EngineConfig config( update -> {}, () -> 0L, (leases, listener) -> listener.onResponse(new ReplicationResponse()), - () -> SafeCommitInfo.EMPTY + () -> SafeCommitInfo.EMPTY, + sId -> false ); globalCheckpointSupplier = replicationTracker; retentionLeasesSupplier = replicationTracker::getRetentionLeases; diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index a65ce3cbdd380..b3d163db8c222 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -247,6 +247,9 @@ protected QueryShardContext createQueryShardContext(MapperService mapperService) when(queryShardContext.getSearchQuoteAnalyzer(any())).thenCallRealMethod(); when(queryShardContext.getSearchAnalyzer(any())).thenCallRealMethod(); when(queryShardContext.getIndexSettings()).thenReturn(mapperService.getIndexSettings()); + when(queryShardContext.getObjectMapper(anyString())).thenAnswer( + inv -> mapperService.getObjectMapper(inv.getArguments()[0].toString()) + ); when(queryShardContext.simpleMatchToIndexNames(any())).thenAnswer( inv -> mapperService.simpleMatchToFullName(inv.getArguments()[0].toString()) ); diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 9800782272ede..a5dc13c334513 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -64,6 +64,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; @@ -96,6 +97,7 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; @@ -340,6 +342,23 @@ public synchronized void startAll() throws IOException { startReplicas(replicas.size()); } + public synchronized DiscoveryNodes generateFakeDiscoveryNodes() { + DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(); + if (primary.indexSettings() != null && primary.indexSettings().isAssignedOnRemoteNode()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId())); + } + for (IndexShard replica : replicas) { + if (replica.indexSettings() != null && replica.indexSettings().isAssignedOnRemoteNode()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId())); + } + } + return builder.build(); + } + public synchronized int startReplicas(int numOfReplicasToStart) throws IOException { if (primary.routingEntry().initializing()) { startPrimary(); @@ -371,7 +390,8 @@ public void startPrimary() throws IOException { null, currentClusterStateVersion.incrementAndGet(), activeIds, - routingTable + routingTable, + generateFakeDiscoveryNodes() ); for (final IndexShard replica : replicas) { recoverReplica(replica); @@ -492,7 +512,8 @@ public synchronized void promoteReplicaToPrimary( primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(), activeIds(), - routingTable + routingTable, + generateFakeDiscoveryNodes() ); } @@ -520,7 +541,7 @@ public synchronized boolean removeReplica(IndexShard replica) throws IOException } public void recoverReplica(IndexShard replica) throws IOException { - recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener)); + recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool)); } public void recoverReplica(IndexShard replica, BiFunction targetSupplier) @@ -638,14 +659,16 @@ public void syncGlobalCheckpoint() { } private void updateAllocationIDsOnPrimary() throws IOException { - primary.updateShardState( primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), activeIds(), - routingTable(Function.identity()) + routingTable(Function.identity()), + primary.indexSettings().isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable(Function.identity()).getShards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable(Function.identity()).getShards()) ); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index bf1c4d4c94e04..b2ece9c813802 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -116,6 +117,7 @@ import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; @@ -617,7 +619,16 @@ protected IndexShard newShard( @Nullable Path remotePath, IndexingOperationListener... listeners ) throws IOException { - final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + DiscoveryNodes discoveryNodes = IndexShardTestUtils.getFakeDiscoveryNodes(routing); + // To simulate that the node is remote backed + if (indexMetadata.getSettings().get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) == "true") { + nodeSettings = Settings.builder() + .put("node.name", routing.currentNodeId()) + .put("node.attr.remote_store.translog.repository", "seg_repo") + .build(); + discoveryNodes = DiscoveryNodes.builder().add(IndexShardTestUtils.getFakeRemoteEnabledNode(routing.currentNodeId())).build(); + } final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings); final IndexShard indexShard; if (storeProvider == null) { @@ -646,7 +657,7 @@ protected IndexShard newShard( RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() || indexSettings.isAssignedOnRemoteNode()) { String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); // remote path via setting a repository . This is a hack used for shards are created using reset . // since we can't get remote path from IndexShard directly, we are using repository to store it . @@ -701,9 +712,11 @@ protected IndexShard newShard( checkpointPublisher, remoteStore, remoteStoreStatsTrackerFactory, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, "dummy-node", - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE, + false, + discoveryNodes ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -977,7 +990,7 @@ protected void closeShards(Iterable shards) throws IOException { protected void recoverShardFromStore(IndexShard primary) throws IOException { primary.markAsRecovering( "store", - new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) + new RecoveryState(primary.routingEntry(), IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) ); recoverFromStore(primary); updateRoutingEntry(primary, ShardRoutingHelper.moveToStarted(primary.routingEntry())); @@ -994,31 +1007,32 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin null, currentClusterStateVersion.incrementAndGet(), inSyncIds, - newRoutingTable + newRoutingTable, + DiscoveryNodes.builder() + .add( + new DiscoveryNode( + shardRouting.currentNodeId(), + shardRouting.currentNodeId(), + buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + ) + .build() ); } protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException { IndexShard primary = null; try { - primary = newStartedShard(true); + primary = newStartedShard(true, replica.indexSettings.getSettings()); recoverReplica(replica, primary, startReplica); } finally { closeShards(primary); } } - protected DiscoveryNode getFakeDiscoNode(String id) { - return new DiscoveryNode( - id, - id, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - DiscoveryNodeRole.BUILT_IN_ROLES, - Version.CURRENT - ); - } - protected void recoverReplica(IndexShard replica, IndexShard primary, boolean startReplica) throws IOException { recoverReplica(replica, primary, startReplica, getReplicationFunc(replica)); } @@ -1033,7 +1047,7 @@ protected void recoverReplica( recoverReplica( replica, primary, - (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener), + (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool), true, startReplica, replicatePrimaryFunction @@ -1051,7 +1065,7 @@ protected void recoverReplica( } public Function, List> getReplicationFunc(final IndexShard target) { - return target.indexSettings().isSegRepEnabled() ? (shardList) -> { + return target.indexSettings().isSegRepEnabledOrRemoteNode() ? (shardList) -> { try { assert shardList.size() >= 2; final IndexShard primary = shardList.get(0); @@ -1095,7 +1109,7 @@ protected void recoverReplica( * @param targetSupplier supplies an instance of {@link RecoveryTarget} * @param markAsRecovering set to {@code false} if the replica is marked as recovering */ - protected final void recoverUnstartedReplica( + public final void recoverUnstartedReplica( final IndexShard replica, final IndexShard primary, final BiFunction targetSupplier, @@ -1104,8 +1118,18 @@ protected final void recoverUnstartedReplica( final IndexShardRoutingTable routingTable, final Function, List> replicatePrimaryFunction ) throws IOException { - final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId()); - final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId()); + final DiscoveryNode pNode; + final DiscoveryNode rNode; + if (primary.isRemoteTranslogEnabled()) { + pNode = IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId()); + } else { + pNode = IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()); + } + if (replica.isRemoteTranslogEnabled()) { + rNode = IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId()); + } else { + rNode = IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId()); + } if (markAsRecovering) { replica.markAsRecovering("remote", new RecoveryState(replica.routingEntry(), pNode, rNode)); } else { @@ -1146,7 +1170,10 @@ protected final void recoverUnstartedReplica( null, currentClusterStateVersion.incrementAndGet(), inSyncIds, - routingTable + routingTable, + primary.isRemoteTranslogEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.getShards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.getShards()) ); try { PlainActionFuture future = new PlainActionFuture<>(); @@ -1180,7 +1207,10 @@ protected void startReplicaAfterRecovery( null, currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, - newRoutingTable + newRoutingTable, + primary.indexSettings.isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.shards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.shards()) ); replica.updateShardState( replica.routingEntry().moveToStarted(), @@ -1188,7 +1218,10 @@ protected void startReplicaAfterRecovery( null, currentClusterStateVersion.get(), inSyncIdsWithReplica, - newRoutingTable + newRoutingTable, + replica.indexSettings.isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.shards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.shards()) ); } @@ -1217,7 +1250,8 @@ protected void promoteReplica(IndexShard replica, Set inSyncIds, IndexSh ), currentClusterStateVersion.incrementAndGet(), inSyncIds, - newRoutingTable + newRoutingTable, + IndexShardTestUtils.getFakeDiscoveryNodes(routingEntry) ); } @@ -1361,7 +1395,7 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot s final Version version = Version.CURRENT; final ShardId shardId = shard.shardId(); final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); - final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); + final DiscoveryNode node = IndexShardTestUtils.getFakeDiscoNode(shard.routingEntry().currentNodeId()); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, @@ -1489,7 +1523,7 @@ private SegmentReplicationTargetService prepareForReplication( SegmentReplicationSourceFactory sourceFactory = null; SegmentReplicationTargetService targetService; - if (primaryShard.indexSettings.isRemoteStoreEnabled()) { + if (primaryShard.indexSettings.isRemoteStoreEnabled() || primaryShard.indexSettings.isAssignedOnRemoteNode()) { RecoverySettings recoverySettings = new RecoverySettings( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java new file mode 100644 index 0000000000000..d3a4a95c3bdef --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class IndexShardTestUtils { + public static DiscoveryNode getFakeDiscoNode(String id) { + return new DiscoveryNode( + id, + id, + IndexShardTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } + + public static DiscoveryNode getFakeRemoteEnabledNode(String id) { + Map remoteNodeAttributes = new HashMap(); + remoteNodeAttributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + return new DiscoveryNode( + id, + id, + IndexShardTestCase.buildNewFakeTransportAddress(), + remoteNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } + + public static DiscoveryNodes getFakeDiscoveryNodes(List shardRoutings) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting routing : shardRoutings) { + builder.add(getFakeDiscoNode(routing.currentNodeId())); + } + return builder.build(); + } + + public static DiscoveryNodes getFakeRemoteEnabledDiscoveryNodes(List shardRoutings) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting routing : shardRoutings) { + builder.add(getFakeRemoteEnabledNode(routing.currentNodeId())); + } + return builder.build(); + } + + public static DiscoveryNodes getFakeDiscoveryNodes(ShardRouting shardRouting) { + return DiscoveryNodes.builder().add(getFakeDiscoNode(shardRouting.currentNodeId())).build(); + } +} diff --git a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java index c478bf9239f74..0e42787b16be8 100644 --- a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java @@ -71,6 +71,17 @@ public static String randomFieldName(Random random) { return fieldName.toString(); } + /** + * Returns a random field name that doesn't exist in the document. + */ + public static String randomNonExistingFieldName(Random random, IngestDocument ingestDocument) { + String fieldName; + do { + fieldName = randomFieldName(random); + } while (canAddField(fieldName, ingestDocument) == false); + return fieldName; + } + /** * Returns a random leaf field name. */ diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index 83b245a1bcecb..8c7e9718eb0cd 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -281,7 +281,22 @@ public double execute(Map params1, double[] values) { } else if (context.instanceClazz.equals(IntervalFilterScript.class)) { IntervalFilterScript.Factory factory = mockCompiled::createIntervalFilterScript; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(DerivedFieldScript.class)) { + DerivedFieldScript.Factory factory = (derivedFieldsParams, lookup) -> ctx -> new DerivedFieldScript( + derivedFieldsParams, + lookup, + ctx + ) { + @Override + public Object execute() { + Map vars = new HashMap<>(derivedFieldsParams); + vars.put("params", derivedFieldsParams); + return script.apply(vars); + } + }; + return context.factoryClazz.cast(factory); } + ContextCompiler compiler = contexts.get(context); if (compiler != null) { return context.factoryClazz.cast(compiler.compile(script::apply, params)); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 4eb49ebb42241..f83163bd139cd 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -95,6 +95,7 @@ import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.FieldAliasMapper; @@ -778,6 +779,10 @@ public void testSupportedFieldTypes() throws IOException { source.put("doc_values", "true"); } + if (mappedType.getKey().equals(ConstantKeywordFieldMapper.CONTENT_TYPE) == true) { + source.put("value", "default_value"); + } + Mapper.Builder builder = mappedType.getValue().parse(fieldName, source, new MockParserContext()); FieldMapper mapper = (FieldMapper) builder.build(new BuilderContext(settings, new ContentPath())); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 7cb1b3f4fe0d8..664314245530e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2070,7 +2070,8 @@ protected boolean addMockGeoShapeFieldMapper() { * @return boolean. */ protected boolean addMockTelemetryPlugin() { - return true; + // setting to false until https://github.com/opensearch-project/OpenSearch/issues/12615 is resolved + return false; } /** diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index aac3fca9e1e16..f381ebdb64fc2 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -211,7 +211,12 @@ "LuceneFixedGap", "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", - "Lucene50" }) + "Lucene50", + "Lucene90", + "Lucene94", + "Lucene90", + "Lucene95", + "Lucene99" }) @LuceneTestCase.SuppressReproduceLine public abstract class OpenSearchTestCase extends LuceneTestCase {