diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 9456fbf8b4ca0..04e2ed5006269 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -1,7 +1,7 @@ name: "Changelog Verifier" on: pull_request: - types: [opened, edited, review_requested, synchronize, reopened, ready_for_review, labeled, unlabeled] + types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] jobs: # Enforces the update of a changelog file on every pull request @@ -13,7 +13,19 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog-3x + with: + skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG-3.0.md' + continue-on-error: true + - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog with: skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG.md' + continue-on-error: true + - run: | + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then + exit 1 + fi diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml index 7efcf529588ed..a62ea9cfa179b 100644 --- a/.github/workflows/pull-request-checks.yml +++ b/.github/workflows/pull-request-checks.yml @@ -18,6 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: peternied/check-pull-request-description-checklist@v1.1 + if: github.actor != 'dependabot[bot]' with: checklist-items: | New functionality includes testing. diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md new file mode 100644 index 0000000000000..0715c6de49ca4 --- /dev/null +++ b/CHANGELOG-3.0.md @@ -0,0 +1,104 @@ +# CHANGELOG +All notable changes to this project are documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. + +## [Unreleased 3.0] +### Added +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) +- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) +- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) +- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) + +### Dependencies +- Bump `log4j-core` from 2.18.0 to 2.19.0 +- Bump `forbiddenapis` from 3.3 to 3.4 +- Bump `avro` from 1.11.1 to 1.11.2 +- Bump `woodstox-core` from 6.3.0 to 6.3.1 +- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) +- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) +- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) +- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) +- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) +- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) +- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) +- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 +- Bump `gson` from 2.10 to 2.10.1 +- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 +- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 +- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 +- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) +- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) +- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) +- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) +- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) + +### Changed +- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) +- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) +- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) +- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) +- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) +- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) + +### Deprecated + +### Removed +- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) +- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) +- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) +- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) +- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) +- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) +- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) +- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) + +### Fixed +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) +- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) +- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) +- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) + +### Security + +[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD diff --git a/CHANGELOG.md b/CHANGELOG.md index 170153d9afe82..9b81d017f7810 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,104 +3,6 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. -## [Unreleased 3.0] -### Added -- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) -- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) -- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) -- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) -- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) -- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) -- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) -- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) -- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) -- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) -- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) - -### Dependencies -- Bump `log4j-core` from 2.18.0 to 2.19.0 -- Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.1 to 1.11.2 -- Bump `woodstox-core` from 6.3.0 to 6.3.1 -- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) -- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) -- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) -- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) -- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) -- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) -- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 -- Bump `gson` from 2.10 to 2.10.1 -- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 -- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 -- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) -- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) -- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) -- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) -- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) -- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) -- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) -- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) - -### Changed -- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) -- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) -- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) -- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) -- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) -- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) -- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) -- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) -- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) -- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) - -### Deprecated - -### Removed -- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) -- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) -- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) -- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) -- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) -- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) -- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) -- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) -- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) -- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) -- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) -- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) -- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) -- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) - -### Fixed -- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) -- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) -- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) -- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) -- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) -- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) - -### Security - ## [Unreleased 2.x] ### Added - Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285)) @@ -114,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) - [Remote Store] Make translog transfer timeout configurable ([#12704](https://github.com/opensearch-project/OpenSearch/pull/12704)) - Reject Resize index requests (i.e, split, shrink and clone), While DocRep to SegRep migration is in progress.([#12686](https://github.com/opensearch-project/OpenSearch/pull/12686)) +- Add support for more than one protocol for transport ([#12967](https://github.com/opensearch-project/OpenSearch/pull/12967)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) @@ -125,6 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `org.apache.commons:commonslang` from 3.13.0 to 3.14.0 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `com.gradle.enterprise` from 3.16.2 to 3.17 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) @@ -140,10 +44,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix bulk API ignores ingest pipeline for upsert ([#12883](https://github.com/opensearch-project/OpenSearch/pull/12883)) - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) -- Fix IndicesRequestCache Stale calculation to be accurate. ([#13070](https://github.com/opensearch-project/OpenSearch/pull/13070)] - Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) +- Fix IndicesRequestCache Stale calculation to be accurate. ([#13070](https://github.com/opensearch-project/OpenSearch/pull/13070)] ### Security -[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD [Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f5494925dcf50..bce6ca0d49294 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -146,12 +146,12 @@ Adding in the change is two step process: 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. ### Where should I put my CHANGELOG entry? -Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain sections for the _next major_ and _next minor_ releases. Your entry should go into the section it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will likely be in that section. +Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain **two files**: `CHANGELOG.md` which corresponds to unreleased changes intended for the _next minor_ release and `CHANGELOG-3.0.md` which correspond to unreleased changes intended for the _next major_ release. Your entry should go into file corresponding to the version it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will be in the `CHANGELOG.md` file. The following examples assume the _next major_ release on main is 3.0, then _next minor_ release is 2.5, and the _current_ release is 2.4. -- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry). -- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` on main, do not backport. +- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` in CHANGELOG.md on main, then backport to 2.x (including the changelog entry). +- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` to CHANGELOG-3.0.md on main, do not backport. - **Upgrade a dependency to fix a CVE:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry), then backport to 2.4 and ensure the changelog entry is added to `[Unreleased 2.4.1]`. ## Review Process diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java index a6e49567ac7d5..ee67fd4f271a2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java @@ -80,6 +80,16 @@ public void reset() throws IOException { delegate.reset(); } + @Override + public void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + @Override public int read() throws IOException { return delegate.read(); diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java index a044586e095e3..ab6dfbc2feb25 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java @@ -12,6 +12,9 @@ import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.hamcrest.Matchers.is; /** test the FilterStreamInput using the same BaseStreamTests */ public class FilterStreamInputTests extends BaseStreamTests { @@ -21,4 +24,24 @@ protected StreamInput getStreamInput(BytesReference bytesReference) throws IOExc return new FilterStreamInput(StreamInput.wrap(br.bytes, br.offset, br.length)) { }; } + + public void testMarkAndReset() throws IOException { + FilterStreamInputTests filterStreamInputTests = new FilterStreamInputTests(); + + ByteBuffer buffer = ByteBuffer.wrap(new byte[20]); + for (int i = 0; i < buffer.limit(); i++) { + buffer.put((byte) i); + } + buffer.rewind(); + BytesReference bytesReference = BytesReference.fromByteBuffer(buffer); + StreamInput streamInput = filterStreamInputTests.getStreamInput(bytesReference); + streamInput.read(); + assertThat(streamInput.markSupported(), is(true)); + streamInput.mark(-1); + int int1 = streamInput.read(); + int int2 = streamInput.read(); + streamInput.reset(); + assertEquals(int1, streamInput.read()); + assertEquals(int2, streamInput.read()); + } } diff --git a/server/build.gradle b/server/build.gradle index 7d52849844aaa..cb48142a61159 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -387,7 +387,7 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { onlyModified = true failOnModification = true ignoreMissingClasses = true - annotationIncludes = ['@org.opensearch.common.annotation.PublicApi'] + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") dependsOn downloadSnapshot diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 52b4dad553180..ec5637cec6485 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -650,6 +650,7 @@ public void testCacheWithInvalidation() throws Exception { .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) ) .get() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 3df4ecff5250c..a2543f0592145 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -56,6 +56,11 @@ protected int numberOfReplicas() { return 1; } + @Override + public boolean useRandomReplicationStrategy() { + return true; + } + public void testPromoteReplicaToPrimary() throws Exception { final String indexName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); createIndex(indexName); @@ -65,7 +70,7 @@ public void testPromoteReplicaToPrimary() throws Exception { try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { waitForDocs(numOfDocs, indexer); } - refresh(indexName); + refreshAndWaitForReplication(indexName); } assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java new file mode 100644 index 0000000000000..de425ffc63816 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteMigrationAllocationDeciderIT extends MigrationBaseTestCase { + + // When the primary is on doc rep node, existing replica copy can get allocated on excluded docrep node. + public void testFilterAllocationSkipsReplica() throws IOException { + addRemote = false; + List docRepNodes = internalCluster().startNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", docRepNodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomDataNode(); + ensureGreen("test"); + } + + // When the primary is on remote node, new replica copy shouldn't get allocated on an excluded docrep node. + public void testFilterAllocationSkipsReplicaOnExcludedNode() throws IOException { + addRemote = false; + List nodes = internalCluster().startNodes(2); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + addRemote = true; + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + String remoteNode = internalCluster().startNode(); + + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand("test", 0, primaryNodeName("test"), remoteNode)) + .execute() + .actionGet(); + client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + assertEquals(remoteNode, primaryNodeName("test")); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", nodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName("test"))); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(2)) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.isTimedOut()); + ensureYellow("test"); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index a31d203058565..640b83f194c1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -19,7 +19,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index ec98d5ff531cb..f5f9d515f2712 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -21,6 +21,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; @@ -56,6 +57,10 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -279,6 +284,11 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { String restoredIndexName1version1 = indexName1 + "-restored-1"; String restoredIndexName1version2 = indexName1 + "-restored-2"; + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); Settings indexSettings = getIndexSettings(1, 0).build(); @@ -476,12 +486,15 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { } void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { - String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); - - Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); - Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); - Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); - Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + Client client = client(); + String path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", TRANSLOG, METADATA).buildAsString(); + Path remoteTranslogMetadataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", TRANSLOG, DATA).buildAsString(); + Path remoteTranslogDataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path segmentMetadataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataPath = Path.of(remoteRepoPath + "/" + path); try ( Stream translogMetadata = Files.list(remoteTranslogMetadataPath); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 46e5b7aa28318..b767ffff05e3a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -23,6 +23,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.Priority; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; @@ -57,7 +58,11 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -182,13 +187,9 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); - + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + ; IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. @@ -212,12 +213,8 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, false, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); @@ -231,12 +228,8 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); @@ -250,12 +243,9 @@ public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Excepti createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(12, 18); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + ; int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); @@ -589,12 +579,8 @@ public void testFallbackToNodeToNodeSegmentCopy() throws Exception { flushAndRefresh(INDEX_NAME); // 3. Delete data from remote segment store - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path segmentDataPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/data"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataPath = Path.of(segmentRepoPath + "/" + shardPath); try (Stream files = Files.list(segmentDataPath)) { files.forEach(p -> { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index acdb21d072320..65016c4976157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; @@ -22,7 +23,10 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; +import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { @@ -45,8 +49,10 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { IndicesStatsResponse response = client().admin().indices().stats(new IndicesStatsRequest()).get(); assertEquals(1, response.getShards().length); + String indexName = response.getShards()[0].getShardRouting().index().getName(); String indexUuid = response.getShards()[0].getShardRouting().index().getUUID(); - Path segmentDataRepoPath = location.resolve(String.format(Locale.ROOT, "%s/0/segments/data", indexUuid)); + String shardPath = getShardLevelBlobPath(client(), indexName, new BlobPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataRepoPath = location.resolve(shardPath); String segmentDataLocalPath = String.format(Locale.ROOT, "%s/indices/%s/0/index", response.getShards()[0].getDataPath(), indexUuid); logger.info("--> Verify that the segment files are same on local and repository eventually"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 78827849a8037..e688a4491b1a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -14,9 +14,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -27,6 +32,8 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.comparesEqualTo; @@ -307,7 +314,21 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { SnapshotInfo snapshotInfo1 = createFullSnapshot(snapshotRepoName, "snap1"); SnapshotInfo snapshotInfo2 = createFullSnapshot(snapshotRepoName, "snap2"); - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); + final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(REMOTE_REPO_NAME); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreEnabledIndexName, + remoteStoreRepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); + BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); + String[] lockFiles; + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + lockFiles = lockDirectory.listAll(); + } assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); // delete remote store index @@ -320,7 +341,9 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { .get(); assertAcked(deleteSnapshotResponse); - lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + lockFiles = lockDirectory.listAll(); + } assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); assertTrue(lockFiles[0].contains(snapshotInfo2.snapshotId().getUUID())); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index af4b2c61a95b1..d3200c1bc9d75 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -38,11 +38,13 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.util.Map; @@ -102,14 +104,32 @@ public class FilterAllocationDecider extends AllocationDecider { private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; + private volatile RemoteStoreNodeService.Direction migrationDirection; + private volatile RemoteStoreNodeService.CompatibilityMode compatibilityMode; public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings)); setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings)); setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings)); + this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); + this.compatibilityMode = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings); + clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters, (a, b) -> {}); + clusterSettings.addSettingsUpdateConsumer(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, this::setMigrationDirection); + clusterSettings.addSettingsUpdateConsumer( + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + this::setCompatibilityMode + ); + } + + private void setMigrationDirection(RemoteStoreNodeService.Direction migrationDirection) { + this.migrationDirection = migrationDirection; + } + + private void setCompatibilityMode(RemoteStoreNodeService.CompatibilityMode compatibilityMode) { + this.compatibilityMode = compatibilityMode; } @Override @@ -127,10 +147,28 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing "initial allocation of the shrunken index is only allowed on nodes [%s] that hold a copy of every shard in the index"; return allocation.decision(Decision.NO, NAME, explanation, initialRecoveryFilters); } + + Decision decision = isRemoteStoreMigrationReplicaDecision(shardRouting, allocation); + if (decision != null) return decision; } return shouldFilter(shardRouting, node.node(), allocation); } + public Decision isRemoteStoreMigrationReplicaDecision(ShardRouting shardRouting, RoutingAllocation allocation) { + assert shardRouting.unassigned(); + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(shardRouting.shardId(), allocation); + if (shardRouting.primary() == false + && shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED + && (compatibilityMode.equals(RemoteStoreNodeService.CompatibilityMode.MIXED)) + && (migrationDirection.equals(RemoteStoreNodeService.Direction.REMOTE_STORE)) + && primaryOnRemote == false) { + String explanation = + "in remote store migration, allocation filters are not applicable for replica copies whose primary is on doc rep node"; + return allocation.decision(Decision.YES, NAME, explanation); + } + return null; + } + @Override public Decision canAllocate(IndexMetadata indexMetadata, RoutingNode node, RoutingAllocation allocation) { return shouldFilter(indexMetadata, node.node(), allocation); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java index 27ebe5390ea6d..7d40aacb71e25 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; @@ -60,9 +61,8 @@ public class RemoteStoreMigrationAllocationDecider extends AllocationDecider { public static final String NAME = "remote_store_migration"; - private Direction migrationDirection; - private CompatibilityMode compatibilityMode; - private boolean remoteStoreBackedIndex; + volatile private Direction migrationDirection; + volatile private CompatibilityMode compatibilityMode; public RemoteStoreMigrationAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); @@ -106,9 +106,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // check for remote store backed indices IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - if (IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.exists(indexMetadata.getSettings())) { - remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); - } + boolean remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); if (remoteStoreBackedIndex && targetNode.isRemoteStoreNode() == false) { // allocations and relocations must be to a remote node String reason = String.format( @@ -133,15 +131,20 @@ private Decision primaryShardDecision(ShardRouting primaryShardRouting, Discover return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "")); } + // Checks if primary shard is on a remote node. + static boolean isPrimaryOnRemote(ShardId shardId, RoutingAllocation allocation) { + ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(shardId); + if (primaryShardRouting != null) { + DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); + return primaryShardNode.isRemoteStoreNode(); + } + return false; + } + private Decision replicaShardDecision(ShardRouting replicaShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { if (targetNode.isRemoteStoreNode()) { - ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(replicaShardRouting.shardId()); - boolean primaryHasMigratedToRemote = false; - if (primaryShardRouting != null) { - DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); - primaryHasMigratedToRemote = primaryShardNode.isRemoteStoreNode(); - } - if (primaryHasMigratedToRemote == false) { + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(replicaShardRouting.shardId(), allocation); + if (primaryOnRemote == false) { return allocation.decision( Decision.NO, NAME, diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index 763594ed52977..6f3e8be7c28b8 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -79,6 +79,15 @@ public BlobPath add(String path) { return new BlobPath(Collections.unmodifiableList(paths)); } + /** + * Add additional level of paths to the existing path and returns new {@link BlobPath} with the updated paths. + */ + public BlobPath add(Iterable paths) { + List updatedPaths = new ArrayList<>(this.paths); + paths.iterator().forEachRemaining(updatedPaths::add); + return new BlobPath(Collections.unmodifiableList(updatedPaths)); + } + public String buildAsString() { String p = String.join(SEPARATOR, paths); if (p.isEmpty() || p.endsWith(SEPARATOR)) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index 30cfc054e3d0a..b51abf19fc000 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -103,9 +103,27 @@ boolean requiresHashAlgorithm() { HASHED_PREFIX(1) { @Override public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { - // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. - // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + assert Objects.nonNull(hashAlgorithm) : "hashAlgorithm is expected to be non-null"; + return BlobPath.cleanPath() + .add(hashAlgorithm.hash(pathInput)) + .add(pathInput.basePath()) + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return true; + } + }, + HASHED_INFIX(2) { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + assert Objects.nonNull(hashAlgorithm) : "hashAlgorithm is expected to be non-null"; return pathInput.basePath() + .add(hashAlgorithm.hash(pathInput)) .add(pathInput.indexUUID()) .add(pathInput.shardId()) .add(pathInput.dataCategory().getName()) @@ -200,10 +218,11 @@ public enum PathHashAlgorithm { FNV_1A(0) { @Override - long hash(PathInput pathInput) { + String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); - return FNV1a.hash32(input); + long hash = FNV1a.hash64(input); + return RemoteStoreUtils.longToUrlBase64(hash); } }; @@ -218,6 +237,7 @@ public int getCode() { } private static final Map CODE_TO_ENUM; + static { PathHashAlgorithm[] values = values(); Map codeToStatus = new HashMap<>(values.length); @@ -240,7 +260,7 @@ public static PathHashAlgorithm fromCode(int code) { return CODE_TO_ENUM.get(code); } - abstract long hash(PathInput pathInput); + abstract String hash(PathInput pathInput); public static PathHashAlgorithm parseString(String pathHashAlgorithm) { try { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index b4c33d781af86..7d0743e70b6cb 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -10,7 +10,9 @@ import org.opensearch.common.collect.Tuple; +import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -101,4 +103,17 @@ public static void verifyNoMultipleWriters(List mdFiles, Function messageHandler + ) throws IOException; + + public boolean canHandleBytes(ReleasableBytesReference reference); + + @Override + void close(); +} diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java index 82fc09a985446..d6b7a98e876b3 100644 --- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java @@ -50,8 +50,8 @@ */ public class InboundDecoder implements Releasable { - static final Object PING = new Object(); - static final Object END_CONTENT = new Object(); + public static final Object PING = new Object(); + public static final Object END_CONTENT = new Object(); private final Version version; private final PageCacheRecycler recycler; diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index a8315c3cae4e0..6492900c49a0e 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -32,35 +32,14 @@ package org.opensearch.transport; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.BytesRef; -import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.common.io.stream.ByteBufferStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.transport.TransportAddress; -import org.opensearch.core.transport.TransportResponse; -import org.opensearch.telemetry.tracing.Span; -import org.opensearch.telemetry.tracing.SpanBuilder; -import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; -import java.io.EOFException; import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Collections; import java.util.Map; -import java.util.stream.Collectors; /** * Handler for inbound data @@ -69,21 +48,13 @@ */ public class InboundHandler { - private static final Logger logger = LogManager.getLogger(InboundHandler.class); - private final ThreadPool threadPool; - private final OutboundHandler outboundHandler; - private final NamedWriteableRegistry namedWriteableRegistry; - private final TransportHandshaker handshaker; - private final TransportKeepAlive keepAlive; - private final Transport.ResponseHandlers responseHandlers; - private final Transport.RequestHandlers requestHandlers; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; private volatile long slowLogThresholdMs = Long.MAX_VALUE; - private final Tracer tracer; + private final Map protocolMessageHandlers; InboundHandler( ThreadPool threadPool, @@ -96,13 +67,19 @@ public class InboundHandler { Tracer tracer ) { this.threadPool = threadPool; - this.outboundHandler = outboundHandler; - this.namedWriteableRegistry = namedWriteableRegistry; - this.handshaker = handshaker; - this.keepAlive = keepAlive; - this.requestHandlers = requestHandlers; - this.responseHandlers = responseHandlers; - this.tracer = tracer; + this.protocolMessageHandlers = Map.of( + NativeInboundMessage.NATIVE_PROTOCOL, + new NativeMessageHandler( + threadPool, + outboundHandler, + namedWriteableRegistry, + handshaker, + requestHandlers, + responseHandlers, + tracer, + keepAlive + ) + ); } void setMessageListener(TransportMessageListener listener) { @@ -117,377 +94,17 @@ void setSlowLogThreshold(TimeValue slowLogThreshold) { this.slowLogThresholdMs = slowLogThreshold.getMillis(); } - void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception { + void inboundMessage(TcpChannel channel, ProtocolInboundMessage message) throws Exception { final long startTime = threadPool.relativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); - TransportLogger.logInboundMessage(channel, message); - if (message.isPing()) { - keepAlive.receiveKeepAlive(channel); - } else { - messageReceived(channel, message, startTime); - } - } - - // Empty stream constant to avoid instantiating a new stream for empty messages. - private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); - - private void messageReceived(TcpChannel channel, InboundMessage message, long startTime) throws IOException { - final InetSocketAddress remoteAddress = channel.getRemoteAddress(); - final Header header = message.getHeader(); - assert header.needsToReadVariableHeader() == false; - ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext existing = threadContext.stashContext()) { - // Place the context with the headers from the message - threadContext.setHeaders(header.getHeaders()); - threadContext.putTransient("_remote_address", remoteAddress); - if (header.isRequest()) { - handleRequest(channel, header, message); - } else { - // Responses do not support short circuiting currently - assert message.isShortCircuit() == false; - final TransportResponseHandler handler; - long requestId = header.getRequestId(); - if (header.isHandshake()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - TransportResponseHandler theHandler = responseHandlers.onResponseReceived( - requestId, - messageListener - ); - if (theHandler == null && header.isError()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - handler = theHandler; - } - } - // ignore if its null, the service logs it - if (handler != null) { - final StreamInput streamInput; - if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { - streamInput = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(streamInput, header.getVersion()); - if (header.isError()) { - handlerResponseError(requestId, streamInput, handler); - } else { - handleResponse(requestId, remoteAddress, streamInput, handler); - } - } else { - assert header.isError() == false; - handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); - } - } - - } - } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; - final long logThreshold = slowLogThresholdMs; - if (logThreshold > 0 && took > logThreshold) { - logger.warn( - "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", - message, - took, - logThreshold - ); - } - } - } - - private Map> extractHeaders(Map headers) { - return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); - } - - private void handleRequest(TcpChannel channel, Header header, InboundMessage message) throws IOException { - final String action = header.getActionName(); - final long requestId = header.getRequestId(); - final Version version = header.getVersion(); - final Map> headers = extractHeaders(header.getHeaders().v1()); - Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); - try (SpanScope spanScope = tracer.withSpanInScope(span)) { - if (header.isHandshake()) { - messageListener.onRequestReceived(requestId, action); - // Cannot short circuit handshakes - assert message.isShortCircuit() == false; - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final TcpTransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); - try { - handshaker.handleHandshake(traceableTransportChannel, requestId, stream); - } catch (Exception e) { - if (Version.CURRENT.isCompatible(header.getVersion())) { - sendErrorResponse(action, traceableTransportChannel, e); - } else { - logger.warn( - new ParameterizedMessage( - "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", - channel, - header.getVersion() - ), - e - ); - channel.close(); - } - } - } else { - final TcpTransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); - try { - messageListener.onRequestReceived(requestId, action); - if (message.isShortCircuit()) { - sendErrorResponse(action, traceableTransportChannel, message.getException()); - } else { - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final RequestHandlerRegistry reg = requestHandlers.getHandler(action); - assert reg != null; - - final T request = newRequest(requestId, action, stream, reg); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - checkStreamIsFullyConsumed(requestId, action, stream); - - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, traceableTransportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), traceableTransportChannel, e); - } - } else { - threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); - } - } - } catch (Exception e) { - sendErrorResponse(action, traceableTransportChannel, e); - } - } - } catch (Exception e) { - span.setError(e); - span.endSpan(); - throw e; - } - } - - /** - * Creates new request instance out of input stream. Throws IllegalStateException if the end of - * the stream was reached before the request is fully deserialized from the stream. - * @param transport request type - * @param requestId request identifier - * @param action action name - * @param stream stream - * @param reg request handler registry - * @return new request instance - * @throws IOException IOException - * @throws IllegalStateException IllegalStateException - */ - private T newRequest( - final long requestId, - final String action, - final StreamInput stream, - final RequestHandlerRegistry reg - ) throws IOException { - try { - return reg.newRequest(stream); - } catch (final EOFException e) { - // Another favor of (de)serialization issues is when stream contains less bytes than - // the request handler needs to deserialize the payload. - throw new IllegalStateException( - "Message fully read (request) but more data is expected for requestId [" - + requestId - + "], action [" - + action - + "]; resetting", - e - ); - } - } - - /** - * Checks if the stream is fully consumed and throws the exceptions if that is not the case. - * @param requestId request identifier - * @param action action name - * @param stream stream - * @throws IOException IOException - */ - private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - final int nextByte = stream.read(); - - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (request) for requestId [" - + requestId - + "], action [" - + action - + "], available [" - + stream.available() - + "]; resetting" - ); - } - } - - /** - * Checks if the stream is fully consumed and throws the exceptions if that is not the case. - * @param requestId request identifier - * @param handler response handler - * @param stream stream - * @param error "true" if response represents error, "false" otherwise - * @throws IOException IOException - */ - private void checkStreamIsFullyConsumed( - final long requestId, - final TransportResponseHandler handler, - final StreamInput stream, - final boolean error - ) throws IOException { - if (stream != EMPTY_STREAM_INPUT) { - // Check the entire message has been read - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (response) for requestId [" - + requestId - + "], handler [" - + handler - + "], error [" - + error - + "]; resetting" - ); - } - } - } - - private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); - } - } - - private void handleResponse( - final long requestId, - InetSocketAddress remoteAddress, - final StreamInput stream, - final TransportResponseHandler handler - ) { - final T response; - try { - response = handler.read(stream); - response.remoteAddress(new TransportAddress(remoteAddress)); - checkStreamIsFullyConsumed(requestId, handler, stream, false); - } catch (Exception e) { - final Exception serializationException = new TransportSerializationException( - "Failed to deserialize response from handler [" + handler + "]", - e - ); - logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); - handleException(handler, serializationException); - return; - } - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - doHandleResponse(handler, response); - } else { - threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); - } + messageReceivedFromPipeline(channel, message, startTime); } - private void doHandleResponse(TransportResponseHandler handler, T response) { - try { - handler.handleResponse(response); - } catch (Exception e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - } - - private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { - Exception error; - try { - error = stream.readException(); - checkStreamIsFullyConsumed(requestId, handler, stream, true); - } catch (Exception e) { - error = new TransportSerializationException( - "Failed to deserialize exception response from stream for handler [" + handler + "]", - e - ); - } - handleException(handler, error); - } - - private void handleException(final TransportResponseHandler handler, Throwable error) { - if (!(error instanceof RemoteTransportException)) { - error = new RemoteTransportException(error.getMessage(), error); - } - final RemoteTransportException rtx = (RemoteTransportException) error; - threadPool.executor(handler.executor()).execute(() -> { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); - } - }); - } - - private StreamInput namedWriteableStream(StreamInput delegate) { - return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); - } - - static void assertRemoteVersion(StreamInput in, Version version) { - assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; - } - - /** - * Internal request handler - * - * @opensearch.internal - */ - private static class RequestHandler extends AbstractRunnable { - private final RequestHandlerRegistry reg; - private final T request; - private final TransportChannel transportChannel; - - RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { - this.reg = reg; - this.request = request; - this.transportChannel = transportChannel; - } - - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); + private void messageReceivedFromPipeline(TcpChannel channel, ProtocolInboundMessage message, long startTime) throws IOException { + ProtocolMessageHandler protocolMessageHandler = protocolMessageHandlers.get(message.getProtocol()); + if (protocolMessageHandler == null) { + throw new IllegalStateException("No protocol message handler found for protocol: " + message.getProtocol()); } + protocolMessageHandler.messageReceived(channel, message, startTime, slowLogThresholdMs, messageListener); } } diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java index 71c4d6973505d..5c68257557061 100644 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/InboundMessage.java @@ -32,105 +32,77 @@ package org.opensearch.transport; -import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.annotation.DeprecatedApi; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.IOException; /** * Inbound data as a message - * + * This api is deprecated, please use {@link org.opensearch.transport.nativeprotocol.NativeInboundMessage} instead. * @opensearch.api */ -@PublicApi(since = "1.0.0") -public class InboundMessage implements Releasable { +@DeprecatedApi(since = "2.14.0") +public class InboundMessage implements Releasable, ProtocolInboundMessage { - private final Header header; - private final ReleasableBytesReference content; - private final Exception exception; - private final boolean isPing; - private Releasable breakerRelease; - private StreamInput streamInput; + private final NativeInboundMessage nativeInboundMessage; public InboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { - this.header = header; - this.content = content; - this.breakerRelease = breakerRelease; - this.exception = null; - this.isPing = false; + this.nativeInboundMessage = new NativeInboundMessage(header, content, breakerRelease); } public InboundMessage(Header header, Exception exception) { - this.header = header; - this.content = null; - this.breakerRelease = null; - this.exception = exception; - this.isPing = false; + this.nativeInboundMessage = new NativeInboundMessage(header, exception); } public InboundMessage(Header header, boolean isPing) { - this.header = header; - this.content = null; - this.breakerRelease = null; - this.exception = null; - this.isPing = isPing; + this.nativeInboundMessage = new NativeInboundMessage(header, isPing); } public Header getHeader() { - return header; + return this.nativeInboundMessage.getHeader(); } public int getContentLength() { - if (content == null) { - return 0; - } else { - return content.length(); - } + return this.nativeInboundMessage.getContentLength(); } public Exception getException() { - return exception; + return this.nativeInboundMessage.getException(); } public boolean isPing() { - return isPing; + return this.nativeInboundMessage.isPing(); } public boolean isShortCircuit() { - return exception != null; + return this.nativeInboundMessage.getException() != null; } public Releasable takeBreakerReleaseControl() { - final Releasable toReturn = breakerRelease; - breakerRelease = null; - if (toReturn != null) { - return toReturn; - } else { - return () -> {}; - } + return this.nativeInboundMessage.takeBreakerReleaseControl(); } public StreamInput openOrGetStreamInput() throws IOException { - assert isPing == false && content != null; - if (streamInput == null) { - streamInput = content.streamInput(); - streamInput.setVersion(header.getVersion()); - } - return streamInput; + return this.nativeInboundMessage.openOrGetStreamInput(); } @Override public void close() { - IOUtils.closeWhileHandlingException(streamInput); - Releasables.closeWhileHandlingException(content, breakerRelease); + this.nativeInboundMessage.close(); } @Override public String toString() { - return "InboundMessage{" + header + "}"; + return this.nativeInboundMessage.toString(); } + + @Override + public String getProtocol() { + return this.nativeInboundMessage.getProtocol(); + } + } diff --git a/server/src/main/java/org/opensearch/transport/InboundPipeline.java b/server/src/main/java/org/opensearch/transport/InboundPipeline.java index dd4690e5e6abf..5cee3bb975223 100644 --- a/server/src/main/java/org/opensearch/transport/InboundPipeline.java +++ b/server/src/main/java/org/opensearch/transport/InboundPipeline.java @@ -38,11 +38,11 @@ import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.nativeprotocol.NativeInboundBytesHandler; import java.io.IOException; import java.util.ArrayDeque; -import java.util.ArrayList; +import java.util.List; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongSupplier; @@ -55,17 +55,16 @@ */ public class InboundPipeline implements Releasable { - private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); - private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); - private final LongSupplier relativeTimeInMillis; private final StatsTracker statsTracker; private final InboundDecoder decoder; private final InboundAggregator aggregator; - private final BiConsumer messageHandler; private Exception uncaughtException; private final ArrayDeque pending = new ArrayDeque<>(2); private boolean isClosed = false; + private final BiConsumer messageHandler; + private final List protocolBytesHandlers; + private InboundBytesHandler currentHandler; public InboundPipeline( Version version, @@ -74,7 +73,7 @@ public InboundPipeline( LongSupplier relativeTimeInMillis, Supplier circuitBreaker, Function> registryFunction, - BiConsumer messageHandler + BiConsumer messageHandler ) { this( statsTracker, @@ -90,18 +89,23 @@ public InboundPipeline( LongSupplier relativeTimeInMillis, InboundDecoder decoder, InboundAggregator aggregator, - BiConsumer messageHandler + BiConsumer messageHandler ) { this.relativeTimeInMillis = relativeTimeInMillis; this.statsTracker = statsTracker; this.decoder = decoder; this.aggregator = aggregator; + this.protocolBytesHandlers = List.of(new NativeInboundBytesHandler(pending, decoder, aggregator, statsTracker)); this.messageHandler = messageHandler; } @Override public void close() { isClosed = true; + if (currentHandler != null) { + currentHandler.close(); + currentHandler = null; + } Releasables.closeWhileHandlingException(decoder, aggregator); Releasables.closeWhileHandlingException(pending); pending.clear(); @@ -124,95 +128,21 @@ public void doHandleBytes(TcpChannel channel, ReleasableBytesReference reference statsTracker.markBytesRead(reference.length()); pending.add(reference.retain()); - final ArrayList fragments = fragmentList.get(); - boolean continueHandling = true; - - while (continueHandling && isClosed == false) { - boolean continueDecoding = true; - while (continueDecoding && pending.isEmpty() == false) { - try (ReleasableBytesReference toDecode = getPendingBytes()) { - final int bytesDecoded = decoder.decode(toDecode, fragments::add); - if (bytesDecoded != 0) { - releasePendingBytes(bytesDecoded); - if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { - continueDecoding = false; - } - } else { - continueDecoding = false; - } - } - } - - if (fragments.isEmpty()) { - continueHandling = false; - } else { - try { - forwardFragments(channel, fragments); - } finally { - for (Object fragment : fragments) { - if (fragment instanceof ReleasableBytesReference) { - ((ReleasableBytesReference) fragment).close(); - } - } - fragments.clear(); - } - } - } - } - - private void forwardFragments(TcpChannel channel, ArrayList fragments) throws IOException { - for (Object fragment : fragments) { - if (fragment instanceof Header) { - assert aggregator.isAggregating() == false; - aggregator.headerReceived((Header) fragment); - } else if (fragment == InboundDecoder.PING) { - assert aggregator.isAggregating() == false; - messageHandler.accept(channel, PING_MESSAGE); - } else if (fragment == InboundDecoder.END_CONTENT) { - assert aggregator.isAggregating(); - try (InboundMessage aggregated = aggregator.finishAggregation()) { - statsTracker.markMessageReceived(); - messageHandler.accept(channel, aggregated); + // If we don't have a current handler, we should try to find one based on the protocol of the incoming bytes. + if (currentHandler == null) { + for (InboundBytesHandler handler : protocolBytesHandlers) { + if (handler.canHandleBytes(reference)) { + currentHandler = handler; + break; } - } else { - assert aggregator.isAggregating(); - assert fragment instanceof ReleasableBytesReference; - aggregator.aggregate((ReleasableBytesReference) fragment); } } - } - private boolean endOfMessage(Object fragment) { - return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; - } - - private ReleasableBytesReference getPendingBytes() { - if (pending.size() == 1) { - return pending.peekFirst().retain(); + // If we have a current handler determined based on protocol, we should continue to use it for the fragmented bytes. + if (currentHandler != null) { + currentHandler.doHandleBytes(channel, reference, messageHandler); } else { - final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; - int index = 0; - for (ReleasableBytesReference pendingReference : pending) { - bytesReferences[index] = pendingReference.retain(); - ++index; - } - final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); - return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); - } - } - - private void releasePendingBytes(int bytesConsumed) { - int bytesToRelease = bytesConsumed; - while (bytesToRelease != 0) { - try (ReleasableBytesReference reference = pending.pollFirst()) { - assert reference != null; - if (bytesToRelease < reference.length()) { - pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); - bytesToRelease -= bytesToRelease; - } else { - bytesToRelease -= reference.length(); - } - } + throw new IllegalStateException("No bytes handler found for the incoming transport protocol"); } } } diff --git a/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java new file mode 100644 index 0000000000000..861b95a8098f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java @@ -0,0 +1,494 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; +import org.opensearch.threadpool.ThreadPool; + +import java.io.EOFException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Native handler for inbound data + * + * @opensearch.internal + */ +public class NativeMessageHandler implements ProtocolMessageHandler { + + private static final Logger logger = LogManager.getLogger(NativeMessageHandler.class); + + private final ThreadPool threadPool; + private final OutboundHandler outboundHandler; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportHandshaker handshaker; + private final TransportKeepAlive keepAlive; + private final Transport.ResponseHandlers responseHandlers; + private final Transport.RequestHandlers requestHandlers; + + private final Tracer tracer; + + NativeMessageHandler( + ThreadPool threadPool, + OutboundHandler outboundHandler, + NamedWriteableRegistry namedWriteableRegistry, + TransportHandshaker handshaker, + Transport.RequestHandlers requestHandlers, + Transport.ResponseHandlers responseHandlers, + Tracer tracer, + TransportKeepAlive keepAlive + ) { + this.threadPool = threadPool; + this.outboundHandler = outboundHandler; + this.namedWriteableRegistry = namedWriteableRegistry; + this.handshaker = handshaker; + this.requestHandlers = requestHandlers; + this.responseHandlers = responseHandlers; + this.tracer = tracer; + this.keepAlive = keepAlive; + } + + // Empty stream constant to avoid instantiating a new stream for empty messages. + private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); + + @Override + public void messageReceived( + TcpChannel channel, + ProtocolInboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException { + InboundMessage inboundMessage = (InboundMessage) message; + TransportLogger.logInboundMessage(channel, inboundMessage); + if (inboundMessage.isPing()) { + keepAlive.receiveKeepAlive(channel); + } else { + handleMessage(channel, inboundMessage, startTime, slowLogThresholdMs, messageListener); + } + } + + private void handleMessage( + TcpChannel channel, + InboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException { + final InetSocketAddress remoteAddress = channel.getRemoteAddress(); + final Header header = message.getHeader(); + assert header.needsToReadVariableHeader() == false; + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + // Place the context with the headers from the message + threadContext.setHeaders(header.getHeaders()); + threadContext.putTransient("_remote_address", remoteAddress); + if (header.isRequest()) { + handleRequest(channel, header, message, messageListener); + } else { + // Responses do not support short circuiting currently + assert message.isShortCircuit() == false; + final TransportResponseHandler handler; + long requestId = header.getRequestId(); + if (header.isHandshake()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + TransportResponseHandler theHandler = responseHandlers.onResponseReceived( + requestId, + messageListener + ); + if (theHandler == null && header.isError()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + handler = theHandler; + } + } + // ignore if its null, the service logs it + if (handler != null) { + final StreamInput streamInput; + if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { + streamInput = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(streamInput, header.getVersion()); + if (header.isError()) { + handlerResponseError(requestId, streamInput, handler); + } else { + handleResponse(requestId, remoteAddress, streamInput, handler); + } + } else { + assert header.isError() == false; + handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); + } + } + + } + } finally { + final long took = threadPool.relativeTimeInMillis() - startTime; + final long logThreshold = slowLogThresholdMs; + if (logThreshold > 0 && took > logThreshold) { + logger.warn( + "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", + message, + took, + logThreshold + ); + } + } + } + + private Map> extractHeaders(Map headers) { + return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); + } + + private void handleRequest( + TcpChannel channel, + Header header, + InboundMessage message, + TransportMessageListener messageListener + ) throws IOException { + final String action = header.getActionName(); + final long requestId = header.getRequestId(); + final Version version = header.getVersion(); + final Map> headers = extractHeaders(header.getHeaders().v1()); + Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + if (header.isHandshake()) { + messageListener.onRequestReceived(requestId, action); + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + handshaker.handleHandshake(traceableTransportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, traceableTransportChannel, e); + } else { + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); + } + } + } else { + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, traceableTransportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, traceableTransportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), traceableTransportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, traceableTransportChannel, e); + } + } + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } + } + + /** + * Creates new request instance out of input stream. Throws IllegalStateException if the end of + * the stream was reached before the request is fully deserialized from the stream. + * @param transport request type + * @param requestId request identifier + * @param action action name + * @param stream stream + * @param reg request handler registry + * @return new request instance + * @throws IOException IOException + * @throws IllegalStateException IllegalStateException + */ + private T newRequest( + final long requestId, + final String action, + final StreamInput stream, + final RequestHandlerRegistry reg + ) throws IOException { + try { + return reg.newRequest(stream); + } catch (final EOFException e) { + // Another favor of (de)serialization issues is when stream contains less bytes than + // the request handler needs to deserialize the payload. + throw new IllegalStateException( + "Message fully read (request) but more data is expected for requestId [" + + requestId + + "], action [" + + action + + "]; resetting", + e + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param action action name + * @param stream stream + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + final int nextByte = stream.read(); + + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (request) for requestId [" + + requestId + + "], action [" + + action + + "], available [" + + stream.available() + + "]; resetting" + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param handler response handler + * @param stream stream + * @param error "true" if response represents error, "false" otherwise + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed( + final long requestId, + final TransportResponseHandler handler, + final StreamInput stream, + final boolean error + ) throws IOException { + if (stream != EMPTY_STREAM_INPUT) { + // Check the entire message has been read + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (response) for requestId [" + + requestId + + "], handler [" + + handler + + "], error [" + + error + + "]; resetting" + ); + } + } + } + + private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { + try { + transportChannel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); + } + } + + private void handleResponse( + final long requestId, + InetSocketAddress remoteAddress, + final StreamInput stream, + final TransportResponseHandler handler + ) { + final T response; + try { + response = handler.read(stream); + response.remoteAddress(new TransportAddress(remoteAddress)); + checkStreamIsFullyConsumed(requestId, handler, stream, false); + } catch (Exception e) { + final Exception serializationException = new TransportSerializationException( + "Failed to deserialize response from handler [" + handler + "]", + e + ); + logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); + handleException(handler, serializationException); + return; + } + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + doHandleResponse(handler, response); + } else { + threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); + } + } + + private void doHandleResponse(TransportResponseHandler handler, T response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException(e)); + } + } + + private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { + Exception error; + try { + error = stream.readException(); + checkStreamIsFullyConsumed(requestId, handler, stream, true); + } catch (Exception e) { + error = new TransportSerializationException( + "Failed to deserialize exception response from stream for handler [" + handler + "]", + e + ); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + } + }); + } + + private StreamInput namedWriteableStream(StreamInput delegate) { + return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); + } + + static void assertRemoteVersion(StreamInput in, Version version) { + assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; + } + + /** + * Internal request handler + * + * @opensearch.internal + */ + private static class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final T request; + private final TransportChannel transportChannel; + + RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } + } + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java b/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java new file mode 100644 index 0000000000000..43c2d5ffe4c96 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.annotation.PublicApi; + +/** + * Base class for inbound data as a message. + * Different implementations are used for different protocols. + * + * @opensearch.internal + */ +@PublicApi(since = "2.14.0") +public interface ProtocolInboundMessage { + + /** + * @return the protocol used to encode this message + */ + public String getProtocol(); + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java b/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java new file mode 100644 index 0000000000000..714d91d1e74c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import java.io.IOException; + +/** + * Interface for message handlers based on transport protocol. + * + * @opensearch.internal + */ +public interface ProtocolMessageHandler { + + public void messageReceived( + TcpChannel channel, + ProtocolInboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 7d45152089f37..e32bba5e836d3 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -762,12 +762,24 @@ protected void serverAcceptedChannel(TcpChannel channel) { protected abstract void stopInternal(); /** + * @deprecated use {@link #inboundMessage(TcpChannel, ProtocolInboundMessage)} * Handles inbound message that has been decoded. * * @param channel the channel the message is from * @param message the message */ + @Deprecated(since = "2.14.0", forRemoval = true) public void inboundMessage(TcpChannel channel, InboundMessage message) { + inboundMessage(channel, (ProtocolInboundMessage) message); + } + + /** + * Handles inbound message that has been decoded. + * + * @param channel the channel the message is from + * @param message the message + */ + public void inboundMessage(TcpChannel channel, ProtocolInboundMessage message) { try { inboundHandler.inboundMessage(channel, message); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java new file mode 100644 index 0000000000000..a8a4c0da7ec0f --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.nativeprotocol; + +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.Header; +import org.opensearch.transport.InboundAggregator; +import org.opensearch.transport.InboundBytesHandler; +import org.opensearch.transport.InboundDecoder; +import org.opensearch.transport.InboundMessage; +import org.opensearch.transport.ProtocolInboundMessage; +import org.opensearch.transport.StatsTracker; +import org.opensearch.transport.TcpChannel; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.function.BiConsumer; + +/** + * Handler for inbound bytes for the native protocol. + */ +public class NativeInboundBytesHandler implements InboundBytesHandler { + + private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); + private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); + + private final ArrayDeque pending; + private final InboundDecoder decoder; + private final InboundAggregator aggregator; + private final StatsTracker statsTracker; + private boolean isClosed = false; + + public NativeInboundBytesHandler( + ArrayDeque pending, + InboundDecoder decoder, + InboundAggregator aggregator, + StatsTracker statsTracker + ) { + this.pending = pending; + this.decoder = decoder; + this.aggregator = aggregator; + this.statsTracker = statsTracker; + } + + @Override + public void close() { + isClosed = true; + } + + @Override + public boolean canHandleBytes(ReleasableBytesReference reference) { + return true; + } + + @Override + public void doHandleBytes( + TcpChannel channel, + ReleasableBytesReference reference, + BiConsumer messageHandler + ) throws IOException { + final ArrayList fragments = fragmentList.get(); + boolean continueHandling = true; + + while (continueHandling && isClosed == false) { + boolean continueDecoding = true; + while (continueDecoding && pending.isEmpty() == false) { + try (ReleasableBytesReference toDecode = getPendingBytes()) { + final int bytesDecoded = decoder.decode(toDecode, fragments::add); + if (bytesDecoded != 0) { + releasePendingBytes(bytesDecoded); + if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { + continueDecoding = false; + } + } else { + continueDecoding = false; + } + } + } + + if (fragments.isEmpty()) { + continueHandling = false; + } else { + try { + forwardFragments(channel, fragments, messageHandler); + } finally { + for (Object fragment : fragments) { + if (fragment instanceof ReleasableBytesReference) { + ((ReleasableBytesReference) fragment).close(); + } + } + fragments.clear(); + } + } + } + } + + private ReleasableBytesReference getPendingBytes() { + if (pending.size() == 1) { + return pending.peekFirst().retain(); + } else { + final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; + int index = 0; + for (ReleasableBytesReference pendingReference : pending) { + bytesReferences[index] = pendingReference.retain(); + ++index; + } + final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); + return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); + } + } + + private void releasePendingBytes(int bytesConsumed) { + int bytesToRelease = bytesConsumed; + while (bytesToRelease != 0) { + try (ReleasableBytesReference reference = pending.pollFirst()) { + assert reference != null; + if (bytesToRelease < reference.length()) { + pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); + bytesToRelease -= bytesToRelease; + } else { + bytesToRelease -= reference.length(); + } + } + } + } + + private boolean endOfMessage(Object fragment) { + return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; + } + + private void forwardFragments( + TcpChannel channel, + ArrayList fragments, + BiConsumer messageHandler + ) throws IOException { + for (Object fragment : fragments) { + if (fragment instanceof Header) { + assert aggregator.isAggregating() == false; + aggregator.headerReceived((Header) fragment); + } else if (fragment == InboundDecoder.PING) { + assert aggregator.isAggregating() == false; + messageHandler.accept(channel, PING_MESSAGE); + } else if (fragment == InboundDecoder.END_CONTENT) { + assert aggregator.isAggregating(); + try (InboundMessage aggregated = aggregator.finishAggregation()) { + statsTracker.markMessageReceived(); + messageHandler.accept(channel, aggregated); + } + } else { + assert aggregator.isAggregating(); + assert fragment instanceof ReleasableBytesReference; + aggregator.aggregate((ReleasableBytesReference) fragment); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java new file mode 100644 index 0000000000000..1143f129b6319 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java @@ -0,0 +1,149 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.nativeprotocol; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.transport.Header; +import org.opensearch.transport.ProtocolInboundMessage; + +import java.io.IOException; + +/** + * Inbound data as a message + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class NativeInboundMessage implements Releasable, ProtocolInboundMessage { + + /** + * The protocol used to encode this message + */ + public static String NATIVE_PROTOCOL = "native"; + + private final Header header; + private final ReleasableBytesReference content; + private final Exception exception; + private final boolean isPing; + private Releasable breakerRelease; + private StreamInput streamInput; + + public NativeInboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { + this.header = header; + this.content = content; + this.breakerRelease = breakerRelease; + this.exception = null; + this.isPing = false; + } + + public NativeInboundMessage(Header header, Exception exception) { + this.header = header; + this.content = null; + this.breakerRelease = null; + this.exception = exception; + this.isPing = false; + } + + public NativeInboundMessage(Header header, boolean isPing) { + this.header = header; + this.content = null; + this.breakerRelease = null; + this.exception = null; + this.isPing = isPing; + } + + @Override + public String getProtocol() { + return NATIVE_PROTOCOL; + } + + public Header getHeader() { + return header; + } + + public int getContentLength() { + if (content == null) { + return 0; + } else { + return content.length(); + } + } + + public Exception getException() { + return exception; + } + + public boolean isPing() { + return isPing; + } + + public boolean isShortCircuit() { + return exception != null; + } + + public Releasable takeBreakerReleaseControl() { + final Releasable toReturn = breakerRelease; + breakerRelease = null; + if (toReturn != null) { + return toReturn; + } else { + return () -> {}; + } + } + + public StreamInput openOrGetStreamInput() throws IOException { + assert isPing == false && content != null; + if (streamInput == null) { + streamInput = content.streamInput(); + streamInput.setVersion(header.getVersion()); + } + return streamInput; + } + + @Override + public void close() { + IOUtils.closeWhileHandlingException(streamInput); + Releasables.closeWhileHandlingException(content, breakerRelease); + } + + @Override + public String toString() { + return "InboundMessage{" + header + "}"; + } + +} diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java new file mode 100644 index 0000000000000..84f6d7d0ec5c2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Native transport protocol package. */ +package org.opensearch.transport.nativeprotocol; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index a8282faaddced..e8273d294f24f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -50,6 +51,8 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -61,6 +64,9 @@ import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; public class FilterAllocationDeciderTests extends OpenSearchAllocationTestCase { @@ -406,4 +412,57 @@ public void testWildcardIPFilter() { "test ip validation" ); } + + public void testMixedModeRemoteStoreAllocation() { + // For mixed mode remote store direction cluster's existing indices replica creation , + // we don't consider filter allocation decider for replica of existing indices + FeatureFlags.initializeFeatureFlags(Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build()); + ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings initialSettings = Settings.builder() + .put("cluster.routing.allocation.exclude._id", "node2") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .build(); + + FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(initialSettings, clusterSettings); + AllocationDeciders allocationDeciders = new AllocationDeciders( + Arrays.asList( + filterAllocationDecider, + new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), + new ReplicaAfterPrimaryActiveAllocationDecider() + ) + ); + AllocationService service = new AllocationService( + allocationDeciders, + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + ClusterState state = createInitialClusterState(service, Settings.EMPTY, Settings.EMPTY); + RoutingTable routingTable = state.routingTable(); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + allocation.debugDecision(true); + ShardRouting sr = ShardRouting.newUnassigned( + routingTable.index("sourceIndex").shard(0).shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "") + ); + Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate( + sr, + state.getRoutingNodes().node("node2"), + allocation + ); + assertEquals(decision.toString(), Type.YES, decision.type()); + + sr = ShardRouting.newUnassigned( + routingTable.index("sourceIndex").shard(0).shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "") + ); + decision = (Decision.Single) filterAllocationDecider.canAllocate(sr, state.getRoutingNodes().node("node2"), allocation); + assertEquals(decision.toString(), Type.NO, decision.type()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index 33008bee1a392..fe5635063f783 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; import org.opensearch.index.remote.RemoteStoreEnums.DataType; @@ -24,7 +25,10 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.parseString; public class RemoteStoreEnumsTests extends OpenSearchTestCase { @@ -136,6 +140,370 @@ public void testGeneratePathForFixedType() { assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); } + public void testGeneratePathForHashedPrefixType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); + } + + public void testGeneratePathForHashedInfixType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList); + basePath = basePath.length() == 0 ? basePath : basePath.substring(0, basePath.length() - 1); + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + String expected = derivePath(basePath, pathInput); + String actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_INFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_INFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + } + + private String derivePath(String basePath, PathInput pathInput) { + return "".equals(basePath) + ? String.join( + SEPARATOR, + FNV_1A.hash(pathInput), + pathInput.indexUUID(), + pathInput.shardId(), + pathInput.dataCategory().getName(), + pathInput.dataType().getName() + ) + : String.join( + SEPARATOR, + basePath, + FNV_1A.hash(pathInput), + pathInput.indexUUID(), + pathInput.shardId(), + pathInput.dataCategory().getName(), + pathInput.dataType().getName() + ); + } + private List getPathList() { List pathList = new ArrayList<>(); int length = randomIntBetween(0, 5); @@ -152,5 +520,4 @@ private String getPath(List pathList) { } return p + SEPARATOR; } - } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java new file mode 100644 index 0000000000000..9d4b41f5c395f --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; + +public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { + + public void testGetMinVersionOlder() { + Settings settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + } + + public void testGetMinVersionNewer() { + PathType pathType = randomFrom(PathType.values()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(pathType, resolver.get().getType()); + if (pathType.requiresHashAlgorithm()) { + assertNotNull(resolver.get().getHashAlgorithm()); + } else { + assertNull(resolver.get().getHashAlgorithm()); + } + + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index d3c7d754d6b61..34074861f2764 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -17,8 +17,10 @@ import java.util.ArrayList; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -179,4 +181,32 @@ public void testVerifyMultipleWriters_Translog() throws InterruptedException { ); } + public void testLongToBase64() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "syVHd0gGq0w", + -5878421770170594047L, + "rmumi5UPDQE", + -5147010836697060622L, + "uJIk6f-V6vI", + 937096430362711837L, + "DQE8PQwOVx0", + 8422273604115462710L, + "dOHtOEZzejY", + -2528761975013221124L, + "3OgIYbXSXPw", + -5512387536280560513L, + "s4AQvdu03H8", + -5749656451579835857L, + "sDUd65cNCi8", + 5569654857969679538L, + "TUtjlYLPvLI", + -1563884000447039930L, + "6kv3yZNv9kY" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + assertEquals(11, entry.getValue().length()); + } + } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 57c126b85ff70..29ffb94ce8bf4 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -43,6 +43,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -56,14 +58,16 @@ protected Collection> getPlugins() { } protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); - BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreIndex, + remoteStorerepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { return Arrays.stream(lockDirectory.listAll()) diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index e002297911788..0d171e17e70e1 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -275,11 +275,11 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws // response so we must just close the connection on an error. To avoid the failure disappearing into a black hole we at least log // it. - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(NativeMessageHandler.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected message", - InboundHandler.class.getCanonicalName(), + NativeMessageHandler.class.getCanonicalName(), Level.WARN, "could not send error response to handshake" ) @@ -308,11 +308,11 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws } public void testLogsSlowInboundProcessing() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(NativeMessageHandler.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected message", - InboundHandler.class.getCanonicalName(), + NativeMessageHandler.class.getCanonicalName(), Level.WARN, "handling inbound transport message " ) diff --git a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java index ae4b537223394..2dfe8a0dd8590 100644 --- a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java @@ -72,24 +72,25 @@ public void testPipelineHandling() throws IOException { final List> expected = new ArrayList<>(); final List> actual = new ArrayList<>(); final List toRelease = new ArrayList<>(); - final BiConsumer messageHandler = (c, m) -> { + final BiConsumer messageHandler = (c, m) -> { try { - final Header header = m.getHeader(); + InboundMessage message = (InboundMessage) m; + final Header header = message.getHeader(); final MessageData actualData; final Version version = header.getVersion(); final boolean isRequest = header.isRequest(); final long requestId = header.getRequestId(); final boolean isCompressed = header.isCompressed(); - if (m.isShortCircuit()) { + if (message.isShortCircuit()) { actualData = new MessageData(version, requestId, isRequest, isCompressed, header.getActionName(), null); } else if (isRequest) { - final TestRequest request = new TestRequest(m.openOrGetStreamInput()); + final TestRequest request = new TestRequest(message.openOrGetStreamInput()); actualData = new MessageData(version, requestId, isRequest, isCompressed, header.getActionName(), request.value); } else { - final TestResponse response = new TestResponse(m.openOrGetStreamInput()); + final TestResponse response = new TestResponse(message.openOrGetStreamInput()); actualData = new MessageData(version, requestId, isRequest, isCompressed, null, response.value); } - actual.add(new Tuple<>(actualData, m.getException())); + actual.add(new Tuple<>(actualData, message.getException())); } catch (IOException e) { throw new AssertionError(e); } @@ -214,7 +215,7 @@ public void testPipelineHandling() throws IOException { } public void testDecodeExceptionIsPropagated() throws IOException { - BiConsumer messageHandler = (c, m) -> {}; + BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); @@ -268,7 +269,7 @@ public void testDecodeExceptionIsPropagated() throws IOException { } public void testEnsureBodyIsNotPrematurelyReleased() throws IOException { - BiConsumer messageHandler = (c, m) -> {}; + BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); diff --git a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java index ff99435f765d8..36ba409a2de03 100644 --- a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java @@ -97,8 +97,9 @@ public void setUp() throws Exception { final InboundAggregator aggregator = new InboundAggregator(breaker, (Predicate) action -> true); pipeline = new InboundPipeline(statsTracker, millisSupplier, decoder, aggregator, (c, m) -> { try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - Streams.copy(m.openOrGetStreamInput(), streamOutput); - message.set(new Tuple<>(m.getHeader(), streamOutput.bytes())); + InboundMessage m1 = (InboundMessage) m; + Streams.copy(m1.openOrGetStreamInput(), streamOutput); + message.set(new Tuple<>(m1.getHeader(), streamOutput.bytes())); } catch (IOException e) { throw new AssertionError(e); } diff --git a/settings.gradle b/settings.gradle index 8fbf32504215b..ccc239ff17062 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.16.2" + id "com.gradle.enterprise" version "3.17" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 38900ce51c3fa..4dd4c734a1701 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -389,7 +389,7 @@ protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperatio } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * supplied node id. * * @param shardId the shard id to use @@ -407,7 +407,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * supplied node id. * * @param shardId the shard id to use @@ -441,7 +441,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * * @param routing shard routing to use @@ -459,7 +459,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param routing shard routing to use * @param indexMetadata indexMetadata for the shard, including any mapping @@ -498,7 +498,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param routing shard routing to use * @param shardPath path to use for shard data diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 0ee889af5ce1a..ce76914882150 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -101,6 +101,8 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -559,19 +561,16 @@ protected void assertDocCount(String index, long count) { } protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); - return getLockFilesInRemoteStore(remoteStoreIndex, remoteStoreRepositoryName, indexUUID); - } - - protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName, String indexUUID) - throws IOException { final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); - BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreIndex, + remoteStoreRepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { return lockDirectory.listAll(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index f0f5576713042..c26c3f8d21380 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -135,6 +135,7 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -210,6 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -377,6 +379,14 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String TESTS_CLUSTER_NAME = "tests.clustername"; + protected static final String REMOTE_BACKED_STORAGE_REPOSITORY_NAME = "test-remote-store-repo"; + + private Path remoteStoreRepositoryPath; + + private ReplicationType randomReplicationType; + + private String randomStorageType; + @BeforeClass public static void beforeClass() throws Exception { testClusterRule.beforeClass(); @@ -1894,11 +1904,19 @@ protected Settings nodeSettings(int nodeOrdinal) { builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } - // Randomly set a replication strategy for the node. Replication Strategy can still be manually overridden by subclass if needed. + // Randomly set a Replication Strategy and storage type for the node. Both Replication Strategy and Storage Type can still be + // manually overridden by subclass if needed. if (useRandomReplicationStrategy()) { - ReplicationType replicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; - logger.info("Randomly using Replication Strategy as {}.", replicationType.toString()); - builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), replicationType); + if (randomReplicationType.equals(ReplicationType.SEGMENT) && randomStorageType.equals("REMOTE_STORE")) { + logger.info("Randomly using Replication Strategy as {} and Storage Type as {}.", randomReplicationType, randomStorageType); + if (remoteStoreRepositoryPath == null) { + remoteStoreRepositoryPath = randomRepoPath().toAbsolutePath(); + } + builder.put(remoteStoreClusterSettings(REMOTE_BACKED_STORAGE_REPOSITORY_NAME, remoteStoreRepositoryPath)); + } else { + logger.info("Randomly using Replication Strategy as {} and Storage Type as {}.", randomReplicationType, randomStorageType); + builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), randomReplicationType); + } } return builder.build(); } @@ -1951,6 +1969,14 @@ protected boolean ignoreExternalCluster() { } protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { + if (useRandomReplicationStrategy()) { + randomReplicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; + if (randomReplicationType.equals(ReplicationType.SEGMENT)) { + randomStorageType = randomBoolean() ? "REMOTE_STORE" : "LOCAL"; + } else { + randomStorageType = "LOCAL"; + } + } String clusterAddresses = System.getProperty(TESTS_CLUSTER); if (Strings.hasLength(clusterAddresses) && ignoreExternalCluster() == false) { if (scope == Scope.TEST) { @@ -2593,6 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } + settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index f381ebdb64fc2..5a3f3b5a07a8d 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -63,14 +63,17 @@ import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.bootstrap.BootstrapForTesting; +import org.opensearch.client.Client; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Numbers; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -120,6 +123,8 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.monitor.jvm.JvmInfo; @@ -1797,4 +1802,39 @@ protected static InetAddress randomIp(boolean v4) { throw new AssertionError(); } } + + public static BlobPath getShardLevelBlobPath( + Client client, + String remoteStoreIndex, + BlobPath basePath, + String shardId, + RemoteStoreEnums.DataCategory dataCategory, + RemoteStoreEnums.DataType dataType + ) { + String indexUUID = client.admin() + .indices() + .prepareGetSettings(remoteStoreIndex) + .get() + .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState(); + Map remoteCustomData = state.metadata() + .index(remoteStoreIndex) + .getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + RemoteStoreEnums.PathType type = Objects.isNull(remoteCustomData) + ? RemoteStoreEnums.PathType.FIXED + : RemoteStoreEnums.PathType.valueOf(remoteCustomData.get(RemoteStoreEnums.PathType.NAME)); + RemoteStoreEnums.PathHashAlgorithm hashAlgorithm = Objects.nonNull(remoteCustomData) + ? remoteCustomData.containsKey(RemoteStoreEnums.PathHashAlgorithm.NAME) + ? RemoteStoreEnums.PathHashAlgorithm.valueOf(remoteCustomData.get(RemoteStoreEnums.PathHashAlgorithm.NAME)) + : null + : null; + RemoteStorePathStrategy.PathInput pathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(basePath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + return type.path(pathInput, hashAlgorithm); + } }