diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml deleted file mode 100644 index b2f22a90938cc..0000000000000 --- a/.github/workflows/check-compatibility.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -name: Check Compatibility - -on: - pull_request_target - -jobs: - check-compatibility: - if: github.repository == 'opensearch-project/OpenSearch' - permissions: - contents: read - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Increase swapfile - run: | - sudo swapoff -a - sudo fallocate -l 10G /swapfile - sudo chmod 600 /swapfile - sudo mkswap /swapfile - sudo swapon /swapfile - sudo swapon --show - - - name: Run compatibility task - run: ./gradlew checkCompatibility -i | tee $HOME/gradlew-check.out - - - name: Get results - run: | - echo '## Compatibility status:' > "${{ github.workspace }}/results.txt" - echo "Checks if related components are compatible with change $(git rev-parse --short HEAD)" >> "${{ github.workspace }}/results.txt" - echo "### Incompatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Incompatible component' $HOME/gradlew-check.out | sed -e 's/Incompatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - echo "### Skipped components" >> "${{ github.workspace }}/results.txt" && grep -e 'Skipped component' $HOME/gradlew-check.out | sed -e 's/Skipped component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - echo "### Compatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Compatible component' $HOME/gradlew-check.out | sed -e 's/Compatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - - - name: Upload results - uses: actions/upload-artifact@v4 - with: - name: results.txt - path: ${{ github.workspace }}/results.txt - - add-comment: - needs: [check-compatibility] - permissions: - pull-requests: write - runs-on: ubuntu-latest - steps: - - name: Download results - uses: actions/download-artifact@v4 - with: - name: results.txt - - - name: Find Comment - uses: peter-evans/find-comment@v3 - id: fc - with: - issue-number: ${{ github.event.number }} - comment-author: 'github-actions[bot]' - body-includes: 'Compatibility status:' - - - name: Add comment on the PR - uses: peter-evans/create-or-update-comment@v4 - with: - comment-id: ${{ steps.fc.outputs.comment-id }} - issue-number: ${{ github.event.number }} - body-path: results.txt - edit-mode: replace diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 0715c6de49ca4..964383078c38d 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -6,68 +6,23 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.0] ### Added - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) -- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) -- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) -- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) -- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) -- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) ### Dependencies -- Bump `log4j-core` from 2.18.0 to 2.19.0 -- Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.1 to 1.11.2 -- Bump `woodstox-core` from 6.3.0 to 6.3.1 -- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) -- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) -- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) -- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) -- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) -- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) -- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 -- Bump `gson` from 2.10 to 2.10.1 -- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 -- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 -- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) -- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) -- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) -- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) -- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) -- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) -- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) -- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) ### Changed -- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) -- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) -- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) - Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) ### Deprecated @@ -92,12 +47,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) -- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) -- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) -- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) -- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) ### Security diff --git a/CHANGELOG.md b/CHANGELOG.md index acd9c32d15fa5..5efcfee3d9d9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,12 +22,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868)) - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) - [Streaming Indexing] Ensure support of the new transport by security plugin ([#13174](https://github.com/opensearch-project/OpenSearch/pull/13174)) +- Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893), [#13117](https://github.com/opensearch-project/OpenSearch/pull/13117)) -- Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) +- Bump `netty` from 4.1.107.Final to 4.1.109.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924), [#13233](https://github.com/opensearch-project/OpenSearch/pull/13233)) - Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996), [#12998](https://github.com/opensearch-project/OpenSearch/pull/12998), [#12999](https://github.com/opensearch-project/OpenSearch/pull/12999)) - Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.26.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) @@ -35,6 +36,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191)) - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) +- Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) +- Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) +- Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) @@ -52,7 +56,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) - Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) +- Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) ### Security diff --git a/TESTING.md b/TESTING.md index 1c91d60840d61..80fc2412d736b 100644 --- a/TESTING.md +++ b/TESTING.md @@ -84,6 +84,7 @@ This will instruct all JVMs (including any that run cli tools such as creating t - In order to remotely attach a debugger to the process: `--debug-jvm` - In order to set a different keystore password: `--keystore-password yourpassword` - In order to set an OpenSearch setting, provide a setting with the following prefix: `-Dtests.opensearch.` +- In order to enable stack trace of the MockSpanData during testing, add: `-Dtests.telemetry.span.stack_traces=true` (Storing stack traces alongside span data can be useful for comprehensive debugging and performance optimization during testing, as it provides insights into the exact code paths and execution sequences, facilitating efficient issue identification and resolution. Note: Enabling this might lead to OOM issues while running ITs) ## Test case filtering @@ -412,8 +413,8 @@ Say you need to make a change to `main` and have a BWC layer in `5.x`. You will You may want to run BWC tests for a secure OpenSearch cluster. In order to do this, you will need to follow a few additional steps: 1. Clone the OpenSearch Security repository from https://github.com/opensearch-project/security. -2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/.0/opensearch-security-.0.zip` or by running `./gradlew assemble` from the base of the Security repository. -3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. +2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/.0/opensearch-security-.0.zip` or by running `./gradlew assemble` from the base of the Security repository. +3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. 4. Run the following command from the base of the Security repository: ``` @@ -428,7 +429,7 @@ You may want to run BWC tests for a secure OpenSearch cluster. In order to do th `-Dtests.security.manager=false` handles access issues when attempting to read the certificates from the file system. `-Dtests.opensearch.http.protocol=https` tells the wait for cluster startup task to do the right thing. -`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. +`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. ### Skip fetching latest diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 873cf7a721ac3..6c6138ac9b7f6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -22,12 +22,13 @@ antlr4 = 4.13.1 guava = 32.1.1-jre protobuf = 3.22.3 jakarta_annotation = 1.3.5 +google_http_client = 1.44.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.108.Final -joda = 2.12.2 +netty = 4.1.109.Final +joda = 2.12.7 # project reactor reactor_netty = 1.1.17 @@ -51,7 +52,7 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.77 +bouncycastle=1.78 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 85efcc43fd65a..92cdda59d1c99 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -17,22 +17,23 @@ opensearchplugin { classname 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' } -versions << [ - 'google': '1.23.0' -] - dependencies { - api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" - api "com.google.api-client:google-api-client:${versions.google}" + api "com.google.apis:google-api-services-compute:v1-rev235-1.25.0" + api "com.google.api-client:google-api-client:1.35.2" api "com.google.oauth-client:google-oauth-client:1.35.0" - api "com.google.http-client:google-http-client:${versions.google}" - api "com.google.http-client:google-http-client-jackson2:${versions.google}" + api "com.google.http-client:google-http-client:${versions.google_http_client}" + api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" + api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" api 'com.google.code.findbugs:jsr305:3.0.2' api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" + api 'io.grpc:grpc-api:1.57.2' + api 'io.opencensus:opencensus-api:0.31.1' + api 'io.opencensus:opencensus-contrib-http-util:0.31.1' + runtimeOnly "com.google.guava:guava:${versions.guava}" } restResources { @@ -43,6 +44,7 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /google-.*/, to: 'google' + mapping from: /opencensus.*/, to: 'opencensus' } check { @@ -55,26 +57,36 @@ test { systemProperty 'tests.artifact', project.name } -thirdPartyAudit.ignoreMissingClasses( - // classes are missing - 'javax.jms.Message', - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'com.google.api.client.json.gson.GsonFactory', - 'com.google.common.base.Preconditions', - 'com.google.common.base.Splitter', - 'com.google.common.cache.CacheBuilder', - 'com.google.common.cache.CacheLoader', - 'com.google.common.cache.LoadingCache', - 'com.google.common.collect.ImmutableMap', - 'com.google.common.collect.ImmutableMap$Builder', - 'com.google.common.collect.ImmutableSet', - 'com.google.common.collect.Lists', - 'com.google.common.collect.Multiset', - 'com.google.common.collect.SortedMultiset', - 'com.google.common.collect.TreeMultiset', - 'com.google.common.io.BaseEncoding', -) +thirdPartyAudit { + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + ) + + ignoreMissingClasses( + 'com.google.api.client.http.apache.v2.ApacheHttpTransport', + 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', + 'com.google.common.util.concurrent.internal.InternalFutures', + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'com.google.gson.stream.JsonWriter', + 'javax.jms.Message', + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger' + ) +} diff --git a/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 deleted file mode 100644 index 0c35d8e08b91f..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522ea860eb48dee71dfe2c61a1fd09663539f556 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 new file mode 100644 index 0000000000000..47245f9429e7d --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 @@ -0,0 +1 @@ +2d737980e34c674da4ff0ae124b80caefdc7198a \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 deleted file mode 100644 index 17219dfe7ecc9..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -004169bfe1cf0e8b2013c9c479e43b731958bc64 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 new file mode 100644 index 0000000000000..f79af846281de --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 @@ -0,0 +1 @@ +67bf1ac84286b4f9ea996a90f6e91e36dc648aff \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 deleted file mode 100644 index 5526275d5a15f..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e86c84ff3c98eca6423e97780325b299133d858 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..501f268254fbc --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 @@ -0,0 +1 @@ +d8956bacb8a4011365fa15a690482c49a70c78c5 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..90ddf3ddc5ee6 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 @@ -0,0 +1 @@ +f3b8967c6f7078da6380687859d0873105f84d39 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 deleted file mode 100644 index 510856a517f04..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd6761f4046a8cb0455e6fa5f58e12b061e9826e \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..4472ffbbebe1c --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 @@ -0,0 +1 @@ +3f1947de0fd9eb250af16abe6103c11e68d11635 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 new file mode 100644 index 0000000000000..8b320fdd2f9cc --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 @@ -0,0 +1 @@ +c71a006b81ddae7bc4b7cb1d2da78c1b173761f4 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt b/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/grpc-api-NOTICE.txt b/plugins/discovery-gce/licenses/grpc-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 b/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 new file mode 100644 index 0000000000000..0d791b5d3f55b --- /dev/null +++ b/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 @@ -0,0 +1 @@ +ad575652d84153075dd41ec6177ccb15251262b2 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/guava-LICENSE.txt b/plugins/discovery-gce/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/guava-NOTICE.txt b/plugins/discovery-gce/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/opencensus-LICENSE.txt b/plugins/discovery-gce/licenses/opencensus-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/opencensus-NOTICE.txt b/plugins/discovery-gce/licenses/opencensus-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..03760848f76ef --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 @@ -0,0 +1 @@ +66a60c7201c2b8b20ce495f0295b32bb0ccbbc57 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..4e123da3ab45f --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 @@ -0,0 +1 @@ +3c13fc5715231fadb16a9b74a44d9d59c460cfa8 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 deleted file mode 100644 index 3e780df9559a9..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..47fb5fd5e5f5d --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 deleted file mode 100644 index f71659316b8cd..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2bb8aa55dc901ee8b8aae7d1007c03592d65e03 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..eb7e650306f73 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +d26f5514b8c54f2878f8d49e0bc8e2acaab3c8bd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 deleted file mode 100644 index 05a8b2d5729bd..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed953791ba0229747dd0fd9911e3d76a462acfd3 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..385a9d930eede --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +dd61bcdb87678451dd42d42e267979bd4b4451a1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 deleted file mode 100644 index 3e780df9559a9..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..47fb5fd5e5f5d --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 93207338f7db8..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b13a709f1c449 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 deleted file mode 100644 index e850aad5f3656..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ad0af28e408092f0d12994802a9f3fe18d45f8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5caf947d87a1b --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +7f4f0c0dd54c578af2c613a0db7172bf7dca9c79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 deleted file mode 100644 index d4ae1b7e71661..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62b6a5dfee2e22ab9015a469cb68e4727596fd4c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..e0f52ab04ea84 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +a77224107f586a7f9e3dc5d12fc0d4d8f0c04803 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 8d299e265646d..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b42cdc2835eb0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1dfc64e19601c..c4b1ab8d6875e 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -75,10 +75,10 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.3' - api 'com.google.http-client:google-http-client-appengine:1.43.3' - api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.44.1' + api "com.google.http-client:google-http-client:${versions.google_http_client}" + api "com.google.http-client:google-http-client-appengine:${versions.google_http_client}" + api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" + api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" api 'com.google.oauth-client:google-oauth-client:1.34.1' diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 deleted file mode 100644 index 800467de8bdf3..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..501f268254fbc --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 @@ -0,0 +1 @@ +d8956bacb8a4011365fa15a690482c49a70c78c5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 deleted file mode 100644 index 4adcca6a55902..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..7b27b165453cd --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 @@ -0,0 +1 @@ +da4f9f691edb7a9f00cd806157a4990cb7e07711 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 deleted file mode 100644 index 43f4fe4a127e1..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -252e267acf720ef6333488740a696a1d5e204639 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..90ddf3ddc5ee6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 @@ -0,0 +1 @@ +f3b8967c6f7078da6380687859d0873105f84d39 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 deleted file mode 100644 index 5f0eed9c5d7e4..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad97680373f9c9f278f597ad6552d44e20418929 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..a874755cc29da --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +3ba1acc8ff088334f2ac5556663f8b737eb8b571 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 deleted file mode 100644 index 6ed00ff79dea9..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -baf7b939ef71b25713cacbe47bef8caf80ce99c6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..83fc39246ef0a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +7307c8acbc9b331fce3496750a5112bdc726fd2a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index fc70fbb0db00e..beb710c84c4b8 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -214,6 +214,11 @@ public Map> extendedStats() { return extendedStats; } + @Override + public boolean isObjectMetadataUploadSupported() { + return true; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 93207338f7db8..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b13a709f1c449 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 8d299e265646d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b42cdc2835eb0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/server/licenses/joda-time-2.12.2.jar.sha1 b/server/licenses/joda-time-2.12.2.jar.sha1 deleted file mode 100644 index 6e9b28eb35597..0000000000000 --- a/server/licenses/joda-time-2.12.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78e18a7b4180e911dafba0a412adfa82c1e3d14b \ No newline at end of file diff --git a/server/licenses/joda-time-2.12.7.jar.sha1 b/server/licenses/joda-time-2.12.7.jar.sha1 new file mode 100644 index 0000000000000..7ce5c501873c0 --- /dev/null +++ b/server/licenses/joda-time-2.12.7.jar.sha1 @@ -0,0 +1 @@ +d015b997eccd511e5567218a51651ff0625f6f25 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java index 34b60d5f3e9b3..e316bae5d8ebc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -41,6 +41,7 @@ public class RemoteDualReplicationIT extends MigrationBaseTestCase { private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; private final String REMOTE_PRI_DOCREP_REMOTE_REP = "remote-primary-docrep-remote-replica"; private final String FAILOVER_REMOTE_TO_DOCREP = "failover-remote-to-docrep"; + private final String FAILOVER_REMOTE_TO_REMOTE = "failover-remote-to-remote"; @Override protected Collection> nodePlugins() { @@ -241,14 +242,63 @@ RLs on remote enabled copies are brought up to (GlobalCkp + 1) upon a flush requ */ extraSettings = Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), "3s").build(); testRemotePrimaryDocRepAndRemoteReplica(); - DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); - assertBusy(() -> { - for (ShardStats shardStats : internalCluster().client() + pollAndCheckRetentionLeases(REMOTE_PRI_DOCREP_REMOTE_REP); + } + + public void testMissingRetentionLeaseCreatedOnFailedOverRemoteReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting docrep data node"); + internalCluster().startDataOnlyNode(); + + Settings zeroReplicasAndOverridenSyncIntervals = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .build(); + createIndex(FAILOVER_REMOTE_TO_REMOTE, zeroReplicasAndOverridenSyncIntervals); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + + indexBulk(FAILOVER_REMOTE_TO_REMOTE, 100); + + logger.info("---> Starting first remote node"); + initDocRepToRemoteMigration(); + addRemote = true; + String firstRemoteNode = internalCluster().startDataOnlyNode(); + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_REMOTE); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, firstRemoteNode); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_REMOTE, 0, primaryShardHostingNode, firstRemoteNode)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_REMOTE, 100, 0); + + String secondRemoteNode = internalCluster().startDataOnlyNode(); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build(); + assertAcked( + internalCluster().client() .admin() .indices() - .prepareStats(REMOTE_PRI_DOCREP_REMOTE_REP) + .prepareUpdateSettings() + .setIndices(FAILOVER_REMOTE_TO_REMOTE) + .setSettings(twoReplicas) .get() - .getShards()) { + ); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + + logger.info("---> Checking retention leases"); + pollAndCheckRetentionLeases(FAILOVER_REMOTE_TO_REMOTE); + } + + private void pollAndCheckRetentionLeases(String indexName) throws Exception { + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertBusy(() -> { + for (ShardStats shardStats : internalCluster().client().admin().indices().prepareStats(indexName).get().getShards()) { ShardRouting shardRouting = shardStats.getShardRouting(); DiscoveryNode discoveryNode = nodes.get(shardRouting.currentNodeId()); RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java index 5ae2a976f4066..c3720e6fbbd09 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -12,11 +12,13 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -28,6 +30,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; import static org.opensearch.node.remotestore.RemoteStoreNodeService.Direction.REMOTE_STORE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -140,6 +143,37 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix assertRemoteStoreBackedIndex(restoredIndexName2); } + // compatibility mode setting test + + public void testSwitchToStrictMode() throws Exception { + logger.info(" --> initialize cluster"); + initializeCluster(false); + + logger.info(" --> create a mixed mode cluster"); + setClusterMode(MIXED.mode); + addRemote = true; + String remoteNodeName = internalCluster().startNode(); + addRemote = false; + String nonRemoteNodeName = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + assertNodeInCluster(remoteNodeName); + assertNodeInCluster(nonRemoteNodeName); + + logger.info(" --> attempt switching to strict mode"); + SettingsException exception = assertThrows(SettingsException.class, () -> setClusterMode(STRICT.mode)); + assertEquals( + "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes", + exception.getMessage() + ); + + logger.info(" --> stop remote node so that cluster had only non-remote nodes"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName)); + ensureStableCluster(2); + + logger.info(" --> attempt switching to strict mode"); + setClusterMode(STRICT.mode); + } + // restore indices from a snapshot private void restoreSnapshot(String snapshotRepoName, String snapshotName, String restoredIndexName) { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index d34a5f4edbaec..f8e5079b01a36 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); - // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + // Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated validatePathType(indexName1, PathType.FIXED); @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); // Close index 2 @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { ensureGreen(indexName2); // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); } private void validatePathType(String index, PathType pathType) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index b767ffff05e3a..78441f74f6b4f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -37,6 +37,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -59,6 +60,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; @@ -77,7 +79,7 @@ public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, MockFsRepositoryPlugin.class); } @Override @@ -789,4 +791,72 @@ public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionExcep docs + moreDocs + uncommittedOps ); } + + // Test local only translog files which are not uploaded to remote store (no metadata present in remote) + // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. + public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { + clusterSettingsSuppliedByTest = true; + + // Overriding settings to use AsyncMultiStreamBlobContainer + Settings settings = Settings.builder() + .put(super.nodeSettings(1)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + MockFsRepositoryPlugin.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + MockFsRepositoryPlugin.TYPE + ) + ) + .build(); + + internalCluster().startClusterManagerOnlyNode(settings); + String dataNode = internalCluster().startDataOnlyNode(settings); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + int searchableDocs = 0; + for (int i = 0; i < randomIntBetween(1, 5); i++) { + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + searchableDocs += 15; + } + indexBulk(INDEX_NAME, 15); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + + // 3. Delete metadata from remote translog + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", TRANSLOG, METADATA).buildAsString(); + Path translogMetaDataPath = Path.of(translogRepoPath + "/" + shardPath); + + try (Stream files = Files.list(translogMetaDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + + internalCluster().restartNode(dataNode); + + ensureGreen(INDEX_NAME); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs + 15); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java index e051265d4b3bc..3f32cf3bfbbe7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -34,6 +35,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -93,7 +95,7 @@ public void testMinDocCountOnDateHistogram() throws Exception { final SearchResponse allResponse = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0)) .get(); final Histogram allHisto = allResponse.getAggregations().get("histo"); @@ -104,4 +106,36 @@ public void testMinDocCountOnDateHistogram() throws Exception { assertEquals(entry.getValue(), results.get(entry.getKey())); } } + + public void testDisableOptimizationGivesSameResults() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setSize(0) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto1 = response.getAggregations().get("histo"); + + final ClusterUpdateSettingsResponse updateSettingResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(MAX_AGGREGATION_REWRITE_FILTERS.getKey(), 0)) + .get(); + + assertEquals(updateSettingResponse.getTransientSettings().get(MAX_AGGREGATION_REWRITE_FILTERS.getKey()), "0"); + + response = client().prepareSearch("idx") + .setSize(0) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto2 = response.getAggregations().get("histo"); + + assertEquals(allHisto1, allHisto2); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(MAX_AGGREGATION_REWRITE_FILTERS.getKey())) + .get(); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 7a52c8aa5018e..54db951eb41c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -572,7 +572,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertEquals(snapshotStatuses.size(), 1); logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); - assertTrue(snapshotStatuses.get(0).getState().completed()); + assertThat(getSnapshot("test-repo", "test-snap-2").state(), equalTo(SnapshotState.PARTIAL)); }, 1, TimeUnit.MINUTES); SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap-2") @@ -589,7 +589,6 @@ public void testRestoreIndexWithMissingShards() throws Exception { // After it was marked as completed in the cluster state - we need to check if it's completed on the file system as well assertBusy(() -> { SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap-2"); - assertTrue(snapshotInfo.state().completed()); assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); }, 1, TimeUnit.MINUTES); } else { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index c574233d25051..fb69209f7adda 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; @@ -101,13 +100,9 @@ public void testStatusApiConsistency() { assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); - final List snapshotStatus = clusterAdmin().snapshotsStatus( - new SnapshotsStatusRequest("test-repo", new String[] { "test-snap" }) - ).actionGet().getSnapshots(); - assertThat(snapshotStatus.size(), equalTo(1)); - final SnapshotStatus snStatus = snapshotStatus.get(0); - assertEquals(snStatus.getStats().getStartTime(), snapshotInfo.startTime()); - assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); + final SnapshotStatus snapshotStatus = getSnapshotStatus("test-repo", "test-snap"); + assertEquals(snapshotStatus.getStats().getStartTime(), snapshotInfo.startTime()); + assertEquals(snapshotStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } public void testStatusAPICallForShallowCopySnapshot() { @@ -357,6 +352,22 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { assertEquals(SnapshotsInProgress.State.FAILED, snapshotsStatusResponse.getSnapshots().get(0).getState()); } + public void testSnapshotStatusOnPartialSnapshot() throws Exception { + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final String indexName = "test-idx"; + createRepository(repoName, "fs"); + // create an index with a single shard on the data node, that will be stopped + createIndex(indexName, singleShardOneNode(dataNode)); + index(indexName, "_doc", "some_doc_id", "foo", "bar"); + logger.info("--> stopping data node before creating snapshot"); + stopNode(dataNode); + startFullSnapshot(repoName, snapshotName, true).get(); + final SnapshotStatus snapshotStatus = getSnapshotStatus(repoName, snapshotName); + assertEquals(SnapshotsInProgress.State.PARTIAL, snapshotStatus.getState()); + } + public void testStatusAPICallInProgressShallowSnapshot() throws Exception { internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 2f3cc77b05550..e6c149216da09 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -45,6 +45,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterManagerTaskKeys; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; @@ -53,12 +54,18 @@ import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; /** * Transport action for updating cluster settings @@ -251,6 +258,7 @@ public void onFailure(String source, Exception e) { @Override public ClusterState execute(final ClusterState currentState) { + validateCompatibilityModeSettingRequest(request, state); final ClusterState clusterState = updater.updateSettings( currentState, clusterSettings.upgradeSettings(request.transientSettings()), @@ -264,4 +272,49 @@ public ClusterState execute(final ClusterState currentState) { ); } + /** + * Runs various checks associated with changing cluster compatibility mode + * @param request cluster settings update request, for settings to be updated and new values + * @param clusterState current state of cluster, for information on nodes + */ + public void validateCompatibilityModeSettingRequest(ClusterUpdateSettingsRequest request, ClusterState clusterState) { + Settings settings = Settings.builder().put(request.persistentSettings()).put(request.transientSettings()).build(); + if (RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.exists(settings)) { + String value = settings.get(RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey()).toLowerCase(Locale.ROOT); + validateAllNodesOfSameVersion(clusterState.nodes()); + if (value.equals(RemoteStoreNodeService.CompatibilityMode.STRICT.mode)) { + validateAllNodesOfSameType(clusterState.nodes()); + } + } + } + + /** + * Verifies that while trying to change the compatibility mode, all nodes must have the same version. + * If not, it throws SettingsException error + * @param discoveryNodes current discovery nodes in the cluster + */ + private void validateAllNodesOfSameVersion(DiscoveryNodes discoveryNodes) { + if (discoveryNodes.getMaxNodeVersion().equals(discoveryNodes.getMinNodeVersion()) == false) { + throw new SettingsException("can not change the compatibility mode when all the nodes in cluster are not of the same version"); + } + } + + /** + * Verifies that while trying to switch to STRICT compatibility mode, all nodes must be of the + * same type (all remote or all non-remote). If not, it throws SettingsException error + * @param discoveryNodes current discovery nodes in the cluster + */ + private void validateAllNodesOfSameType(DiscoveryNodes discoveryNodes) { + Set nodeTypes = discoveryNodes.getNodes() + .values() + .stream() + .map(DiscoveryNode::isRemoteStoreNode) + .collect(Collectors.toSet()); + if (nodeTypes.size() != 1) { + throw new SettingsException( + "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes" + ); + } + } + } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 7f6c039cf2ecc..4fc2acb2caa51 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -356,11 +356,11 @@ private void loadRepositoryData( state = SnapshotsInProgress.State.FAILED; break; case SUCCESS: - case PARTIAL: - // Translating both PARTIAL and SUCCESS to SUCCESS for now - // TODO: add the differentiation on the metadata level in the next major release state = SnapshotsInProgress.State.SUCCESS; break; + case PARTIAL: + state = SnapshotsInProgress.State.PARTIAL; + break; default: throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state()); } diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 3de23d2490c63..d658f38430dd9 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -747,7 +747,12 @@ public void writeTo(StreamOutput out) throws IOException { snapshot.writeTo(out); out.writeBoolean(includeGlobalState); out.writeBoolean(partial); - out.writeByte(state.value()); + if ((out.getVersion().before(Version.V_2_14_0)) && state == State.PARTIAL) { + // Setting to SUCCESS for partial snapshots in older versions to maintain backward compatibility + out.writeByte(State.SUCCESS.value()); + } else { + out.writeByte(state.value()); + } out.writeList(indices); out.writeLong(startTime); out.writeMap(shards, (o, v) -> v.writeTo(o), (o, v) -> v.writeTo(o)); @@ -937,7 +942,8 @@ public enum State { STARTED((byte) 1, false), SUCCESS((byte) 2, true), FAILED((byte) 3, true), - ABORTED((byte) 4, false); + ABORTED((byte) 4, false), + PARTIAL((byte) 5, false); private final byte value; @@ -968,6 +974,8 @@ public static State fromValue(byte value) { return FAILED; case 4: return ABORTED; + case 5: + return PARTIAL; default: throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d55ec3362b01f..0eba4d241f0fd 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -98,6 +98,7 @@ import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; @@ -191,7 +192,8 @@ public MetadataCreateIndexService( final NamedXContentRegistry xContentRegistry, final SystemIndices systemIndices, final boolean forbidPrivateIndexSettings, - final AwarenessReplicaBalance awarenessReplicaBalance + final AwarenessReplicaBalance awarenessReplicaBalance, + final RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.clusterService = clusterService; @@ -211,7 +213,7 @@ public MetadataCreateIndexService( createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings(), minNodeVersionSupplier) + ? new RemoteStorePathStrategyResolver(remoteStoreSettings, minNodeVersionSupplier) : null; } diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 4f5f8d4b1ef5f..46e82a1f5ef0a 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -83,14 +83,15 @@ public interface BlobContainer { * Creates a new {@link FetchBlobResult} for the given blob name. * * @param blobName - * The name of the blob to get an {@link InputStream} for. + * The name of the blob to get an {@link FetchBlobResult} for. * @return The {@link FetchBlobResult} of the blob. * @throws NoSuchFileException if the blob does not exist * @throws IOException if the blob can not be read. */ @ExperimentalApi default FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { - throw new UnsupportedOperationException("readBlobWithMetadata is not implemented yet"); + InputStream inputStream = readBlob(blobName); + return new FetchBlobResult(inputStream, null); }; /** diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index 0f6646d37f950..95341d1df40f7 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -71,6 +71,13 @@ default Map> extendedStats() { */ default void reload(RepositoryMetadata repositoryMetadata) {} + /** + * Returns a boolean indicating if blobStore support object metadata upload + */ + default boolean isObjectMetadataUploadSupported() { + return false; + } + /** * Metrics for BlobStore interactions */ diff --git a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java b/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java index 55aca771b586c..4e8ea4c6b089f 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java +++ b/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java @@ -10,6 +10,8 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.io.Closeable; +import java.io.IOException; import java.io.InputStream; import java.util.Map; @@ -20,7 +22,7 @@ * @opensearch.experimental */ @ExperimentalApi -public class FetchBlobResult { +public class FetchBlobResult implements Closeable { /** * Downloaded blob InputStream @@ -45,4 +47,10 @@ public FetchBlobResult(InputStream inputStream, Map metadata) { this.metadata = metadata; } + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ef74a794a9975..dab0f6bcf1c85 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -525,6 +525,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.MAX_OPEN_SCROLL_CONTEXT, SearchService.MAX_OPEN_PIT_CONTEXT, SearchService.MAX_PIT_KEEPALIVE_SETTING, + SearchService.MAX_AGGREGATION_REWRITE_FILTERS, CreatePitController.PIT_INIT_KEEP_ALIVE, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, @@ -712,7 +713,6 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, @@ -730,7 +730,9 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, - RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index b51abf19fc000..c1ac74724e405 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -9,6 +9,7 @@ package org.opensearch.index.remote; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.hash.FNV1a; @@ -23,6 +24,8 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; /** * This class contains the different enums related to remote store like data categories and types, path types @@ -30,12 +33,14 @@ * * @opensearch.api */ +@ExperimentalApi public class RemoteStoreEnums { /** * Categories of the data in Remote store. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum DataCategory { SEGMENTS("segments", Set.of(DataType.values())), TRANSLOG("translog", Set.of(DATA, METADATA)); @@ -61,6 +66,7 @@ public String getName() { * Types of data in remote store. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum DataType { DATA("data"), METADATA("metadata"), @@ -82,6 +88,7 @@ public String getName() { * For more information, see Github issue #12567. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum PathType { FIXED(0) { @Override @@ -214,15 +221,29 @@ public static PathType parseString(String pathType) { * Type of hashes supported for path types that have hashing. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum PathHashAlgorithm { - FNV_1A(0) { + FNV_1A_BASE64(0) { @Override String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); long hash = FNV1a.hash64(input); - return RemoteStoreUtils.longToUrlBase64(hash); + return longToUrlBase64(hash); + } + }, + /** + * This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14 + * bits to create binary string. + */ + FNV_1A_COMPOSITE_1(1) { + @Override + String hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + long hash = FNV1a.hash64(input); + return longToCompositeBase64AndBinaryEncoding(hash, 20); } }; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5b067115df781..a33f7522daaae 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -9,10 +9,9 @@ package org.opensearch.index.remote; import org.opensearch.Version; -import org.opensearch.common.settings.ClusterSettings; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import java.util.function.Supplier; @@ -23,27 +22,21 @@ */ public class RemoteStorePathStrategyResolver { - private volatile PathType type; - + private final RemoteStoreSettings remoteStoreSettings; private final Supplier minNodeVersionSupplier; - public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { + public RemoteStorePathStrategyResolver(RemoteStoreSettings remoteStoreSettings, Supplier minNodeVersionSupplier) { + this.remoteStoreSettings = remoteStoreSettings; this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } public RemoteStorePathStrategy get() { PathType pathType; PathHashAlgorithm pathHashAlgorithm; // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. - pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; + pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } - - private void setType(PathType type) { - this.type = type; - } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java index 33cd40f802d43..52f83096dc08d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java @@ -78,7 +78,9 @@ public void validateSegmentsUploadLag(ShardId shardId) { for (LagValidator lagValidator : lagValidators) { if (lagValidator.validate(remoteSegmentTransferTracker, shardId) == false) { remoteSegmentTransferTracker.incrementRejectionCount(lagValidator.name()); - throw new OpenSearchRejectedExecutionException(lagValidator.rejectionMessage(remoteSegmentTransferTracker, shardId)); + String rejectionMessage = lagValidator.rejectionMessage(remoteSegmentTransferTracker, shardId); + logger.warn("Rejecting write requests for shard due to remote backpressure: {}", rejectionMessage); + throw new OpenSearchRejectedExecutionException(rejectionMessage); } } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 7d0743e70b6cb..4d1d98334c3c4 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -15,6 +15,7 @@ import java.util.Base64; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -26,10 +27,16 @@ public class RemoteStoreUtils { public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + /** + * URL safe base 64 character set. This must not be changed as this is used in deriving the base64 equivalent of binary. + */ + static final char[] URL_BASE64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".toCharArray(); + /** * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding * of 0s to the string. + * * @param num number to get the inverted long string for * @return String value of Long.MAX_VALUE - num */ @@ -46,6 +53,7 @@ public static String invertLong(long num) { /** * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * * @param str long in string format to be inverted * @return long value of the invert result */ @@ -59,6 +67,7 @@ public static long invertLong(String str) { /** * Extracts the segment name from the provided segment file name + * * @param filename Segment file name to parse * @return Name of the segment that the segment file belongs to */ @@ -79,10 +88,9 @@ public static String getSegmentName(String filename) { } /** - * * @param mdFiles List of segment/translog metadata files - * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . - * fn returns null if node id is not part of the file name + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name */ public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { Map nodesByPrimaryTermAndGen = new HashMap<>(); @@ -116,4 +124,26 @@ static String longToUrlBase64(long value) { String base64Str = Base64.getUrlEncoder().encodeToString(hashBytes); return base64Str.substring(0, base64Str.length() - 1); } + + static long urlBase64ToLong(String base64Str) { + byte[] hashBytes = Base64.getUrlDecoder().decode(base64Str); + return ByteBuffer.wrap(hashBytes).getLong(); + } + + /** + * Converts an input hash which occupies 64 bits of memory into a composite encoded string. The string will have 2 parts - + * 1. Base 64 string and 2. Binary String. We will use the first 6 bits for creating the base 64 string. + * For the second part, the rest of the bits (of length {@code len}-6) will be used as is in string form. + */ + static String longToCompositeBase64AndBinaryEncoding(long value, int len) { + if (len < 7 || len > 64) { + throw new IllegalArgumentException("In longToCompositeBase64AndBinaryEncoding, len must be between 7 and 64 (both inclusive)"); + } + String binaryEncoding = String.format(Locale.ROOT, "%64s", Long.toBinaryString(value)).replace(' ', '0'); + String base64Part = binaryEncoding.substring(0, 6); + String binaryPart = binaryEncoding.substring(6, len); + int base64DecimalValue = Integer.valueOf(base64Part, 2); + assert base64DecimalValue >= 0 && base64DecimalValue < 64; + return URL_BASE64_CHARSET[base64DecimalValue] + binaryPart; + } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 599c54f293e89..39b1cc130d6a5 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -255,6 +255,12 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final Function isShardOnRemoteEnabledNode; + /** + * Flag to indicate whether {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} + * has been run successfully + */ + private boolean createdMissingRetentionLeases; + /** * Get all retention leases tracked on this shard. * @@ -955,7 +961,13 @@ private boolean invariant() { assert checkpoints.get(aId) != null : "aId [" + aId + "] is pending in sync but isn't tracked"; } - if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { + if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases + // Skip assertion if createMissingPeerRecoveryRetentionLeases has not yet run after activating primary context + // This is required since during an ongoing remote store migration, + // remote enabled primary taking over primary context from another remote enabled shard + // might not have retention leases for docrep shard copies + // (since all RetentionLease sync actions are blocked on remote shard copies) + && createdMissingRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { final CheckpointState cps = checkpoints.get(shardRouting.allocationId().getId()); @@ -1843,19 +1855,34 @@ private synchronized void setHasAllPeerRecoveryRetentionLeases() { assert invariant(); } + private synchronized void setCreatedMissingRetentionLeases() { + createdMissingRetentionLeases = true; + assert invariant(); + } + public synchronized boolean hasAllPeerRecoveryRetentionLeases() { return hasAllPeerRecoveryRetentionLeases; } /** - * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * Create any required peer-recovery retention leases that do not currently exist. This can happen if either: + * - We just did a rolling upgrade from a version prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * - In a mixed mode cluster (during remote store migration), a remote enabled primary shard copy fails over to another remote enabled shard copy, + * but the replication group still has other shards in docrep nodes */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { - if (hasAllPeerRecoveryRetentionLeases == false) { + // Create missing RetentionLeases if the primary is on a remote enabled + // and the replication group has at-least one shard copy in docrep enabled node + // No-Op if retention leases for the tracked shard copy already exists + boolean createMissingRetentionLeasesDuringMigration = indexSettings.isAssignedOnRemoteNode() + && replicationGroup.getReplicationTargets() + .stream() + .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId()) == false); + if (hasAllPeerRecoveryRetentionLeases == false || createMissingRetentionLeasesDuringMigration) { final List shardRoutings = routingTable.assignedShards(); final GroupedActionListener groupedActionListener = new GroupedActionListener<>(ActionListener.wrap(vs -> { setHasAllPeerRecoveryRetentionLeases(); + setCreatedMissingRetentionLeases(); listener.onResponse(null); }, listener::onFailure), shardRoutings.size()); for (ShardRouting shardRouting : shardRoutings) { diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 67799f0465c29..da905b9605dfd 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -21,6 +21,7 @@ import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; import org.opensearch.index.translog.transfer.TransferSnapshot; @@ -219,7 +220,7 @@ static void download(TranslogTransferManager translogTransferManager, Path locat throw ex; } - static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + private static void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { logger.debug("Downloading translog files from remote"); RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); @@ -254,10 +255,32 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager location.resolve(Translog.getCommitCheckpointFileName(translogMetadata.getGeneration())), location.resolve(Translog.CHECKPOINT_FILE_NAME) ); + } else { + // When code flow reaches this block, it means we don't have any translog files uploaded to remote store. + // If local filesystem contains empty translog or no translog, we don't do anything. + // If local filesystem contains non-empty translog, we clean up these files and create empty translog. + logger.debug("No translog files found on remote, checking local filesystem for cleanup"); + if (FileSystemUtils.exists(location.resolve(CHECKPOINT_FILE_NAME))) { + final Checkpoint checkpoint = readCheckpoint(location); + if (isEmptyTranslog(checkpoint) == false) { + logger.debug("Translog files exist on local without any metadata in remote, cleaning up these files"); + // Creating empty translog will cleanup the older un-referenced tranlog files, we don't have to explicitly delete + Translog.createEmptyTranslog(location, translogTransferManager.getShardId(), checkpoint); + } else { + logger.debug("Empty translog on local, skipping clean-up"); + } + } } logger.debug("downloadOnce execution completed"); } + private static boolean isEmptyTranslog(Checkpoint checkpoint) { + return checkpoint.generation == checkpoint.minTranslogGeneration + && checkpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && checkpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && checkpoint.numOps == 0; + } + public static TranslogTransferManager buildTranslogTransferManager( BlobStoreRepository blobStoreRepository, ThreadPool threadPool, diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 7c50ed6ecd58f..c653605f8fa10 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -2011,17 +2011,47 @@ public static String createEmptyTranslog( final long primaryTerm, @Nullable final String translogUUID, @Nullable final ChannelFactory factory + ) throws IOException { + return createEmptyTranslog(location, shardId, initialGlobalCheckpoint, primaryTerm, translogUUID, factory, 1); + } + + public static String createEmptyTranslog(final Path location, final ShardId shardId, Checkpoint checkpoint) throws IOException { + final Path highestGenTranslogFile = location.resolve(getFilename(checkpoint.generation)); + final TranslogHeader translogHeader; + try (FileChannel channel = FileChannel.open(highestGenTranslogFile, StandardOpenOption.READ)) { + translogHeader = TranslogHeader.read(highestGenTranslogFile, channel); + } + final String translogUUID = translogHeader.getTranslogUUID(); + final long primaryTerm = translogHeader.getPrimaryTerm(); + final ChannelFactory channelFactory = FileChannel::open; + return Translog.createEmptyTranslog( + location, + shardId, + SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm, + translogUUID, + channelFactory, + checkpoint.generation + 1 + ); + } + + public static String createEmptyTranslog( + final Path location, + final ShardId shardId, + final long initialGlobalCheckpoint, + final long primaryTerm, + @Nullable final String translogUUID, + @Nullable final ChannelFactory factory, + final long generation ) throws IOException { IOUtils.rm(location); Files.createDirectories(location); - final long generation = 1L; - final long minTranslogGeneration = 1L; final ChannelFactory channelFactory = factory != null ? factory : FileChannel::open; final String uuid = Strings.hasLength(translogUUID) ? translogUUID : UUIDs.randomBase64UUID(); final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); final Path translogFile = location.resolve(getFilename(generation)); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, minTranslogGeneration); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, generation); Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final TranslogWriter writer = TranslogWriter.create( @@ -2031,7 +2061,7 @@ public static String createEmptyTranslog( translogFile, channelFactory, EMPTY_TRANSLOG_BUFFER_SIZE, - minTranslogGeneration, + generation, initialGlobalCheckpoint, () -> { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index bec2d78d9af62..4de25f30ea0d6 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionRunnable; +import org.opensearch.action.LatchedActionListener; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; @@ -32,10 +33,15 @@ import java.io.InputStream; import java.nio.channels.FileChannel; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; @@ -48,12 +54,40 @@ public class BlobStoreTransferService implements TransferService { private final BlobStore blobStore; private final ThreadPool threadPool; + private ConcurrentHashMap fileTransferTrackerCache; private static final Logger logger = LogManager.getLogger(BlobStoreTransferService.class); public BlobStoreTransferService(BlobStore blobStore, ThreadPool threadPool) { this.blobStore = blobStore; this.threadPool = threadPool; + fileTransferTrackerCache = new ConcurrentHashMap<>(); + } + + @Override + public ConcurrentHashMap getFileTransferTrackerCache() { + return fileTransferTrackerCache; + } + + void add(String file, boolean success) { + FileTransferTracker.TransferState targetState = success + ? FileTransferTracker.TransferState.SUCCESS + : FileTransferTracker.TransferState.FAILED; + add(file, targetState); + } + + private void add(String file, FileTransferTracker.TransferState targetState) { + fileTransferTrackerCache.compute(file, (k, v) -> { + if (v == null || v.validateNextState(targetState)) { + return targetState; + } + throw new IllegalStateException("Unexpected transfer state " + v + "while setting target to" + targetState); + }); + } + + @Override + public boolean isObjectMetadataUploadSupported() { + return blobStore.isObjectMetadataUploadSupported(); } @Override @@ -93,12 +127,102 @@ public void uploadBlobs( ActionListener listener, WritePriority writePriority ) { + boolean isObjectMetadataUploadSupported = isObjectMetadataUploadSupported(); + fileSnapshots.forEach(fileSnapshot -> { BlobPath blobPath = blobPaths.get(fileSnapshot.getPrimaryTerm()); - if (!(blobStore.blobContainer(blobPath) instanceof AsyncMultiStreamBlobContainer)) { - uploadBlob(ThreadPool.Names.TRANSLOG_TRANSFER, fileSnapshot, blobPath, listener, writePriority); + + if (!isObjectMetadataUploadSupported) { + List exceptionList = new ArrayList<>(2); + + Set filesToUpload = new HashSet<>(2); + try { + if (!fileTransferTrackerCache.get(fileSnapshot.getName()).equals(FileTransferTracker.TransferState.SUCCESS)) { + filesToUpload.add( + new TransferFileSnapshot(fileSnapshot.getPath(), fileSnapshot.getPrimaryTerm(), fileSnapshot.getChecksum()) + ); + } + if (!fileTransferTrackerCache.get(fileSnapshot.getMetadataFileName()) + .equals(FileTransferTracker.TransferState.FAILED)) { + filesToUpload.add( + new TransferFileSnapshot( + fileSnapshot.getMetadataFilePath(), + fileSnapshot.getPrimaryTerm(), + fileSnapshot.getMetadataFileChecksum() + ) + ); + } + } catch (IOException e) { + throw new FileTransferException(fileSnapshot, e); + } + + if (filesToUpload.isEmpty()) { + listener.onResponse(fileSnapshot); + return; + } + + final CountDownLatch latch = new CountDownLatch(filesToUpload.size()); + LatchedActionListener latchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(fileSnapshot1 -> add(fileSnapshot1.getName(), true), ex -> { + assert ex instanceof FileTransferException; + logger.error( + () -> new ParameterizedMessage( + "Exception during transfer for file {}", + ((FileTransferException) ex).getFileSnapshot().getName() + ), + ex + ); + FileTransferException e = (FileTransferException) ex; + TransferFileSnapshot file = e.getFileSnapshot(); + add(file.getName(), false); + exceptionList.add(ex); + }), + latch + ); + + filesToUpload.forEach(separateFileSnapshot -> { + if (!(blobStore.blobContainer(blobPath) instanceof AsyncMultiStreamBlobContainer)) { + uploadBlob( + ThreadPool.Names.TRANSLOG_TRANSFER, + separateFileSnapshot, + blobPath, + latchedActionListener, + writePriority + ); + } else { + logger.info("uploading file = {}", fileSnapshot.getName()); + uploadBlob(separateFileSnapshot, latchedActionListener, blobPath, writePriority); + } + }); + + try { + if (latch.await(300, TimeUnit.SECONDS) == false) { + Exception ex = new TranslogUploadFailedException( + "Timed out waiting for transfer of snapshot " + fileSnapshot + " to complete" + ); + throw new FileTransferException(fileSnapshot, ex); + } + } catch (InterruptedException ex) { + Exception exception = new TranslogUploadFailedException("Failed to upload " + fileSnapshot, ex); + Thread.currentThread().interrupt(); + throw new FileTransferException(fileSnapshot, exception); + } + + if (fileTransferTrackerCache.get(fileSnapshot.getName()).equals(FileTransferTracker.TransferState.SUCCESS) + && fileTransferTrackerCache.get(fileSnapshot.getMetadataFileName()).equals(FileTransferTracker.TransferState.SUCCESS)) { + listener.onResponse(fileSnapshot); + } else { + assert exceptionList.isEmpty() == false; + listener.onFailure(exceptionList.get(0)); + } + } else { - uploadBlob(fileSnapshot, listener, blobPath, writePriority); + if (!(blobStore.blobContainer(blobPath) instanceof AsyncMultiStreamBlobContainer)) { + uploadBlob(ThreadPool.Names.TRANSLOG_TRANSFER, fileSnapshot, blobPath, listener, writePriority); + } else { + logger.info("uploading file = {}", fileSnapshot.getName()); + uploadBlob(fileSnapshot, listener, blobPath, writePriority); + } } }); @@ -112,6 +236,11 @@ private void uploadBlob( ) { try { + Map metadata = null; + if (isObjectMetadataUploadSupported()) { + metadata = fileSnapshot.prepareFileMetadata(); + } + ChannelFactory channelFactory = FileChannel::open; long contentLength; try (FileChannel channel = channelFactory.open(fileSnapshot.getPath(), StandardOpenOption.READ)) { @@ -130,7 +259,8 @@ private void uploadBlob( writePriority, (size, position) -> new OffsetRangeFileInputStream(fileSnapshot.getPath(), size, position), Objects.requireNonNull(fileSnapshot.getChecksum()), - remoteIntegrityEnabled + remoteIntegrityEnabled, + metadata ); ActionListener completionListener = ActionListener.wrap(resp -> listener.onResponse(fileSnapshot), ex -> { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), ex); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java index dcec94edd694f..0f0aa640aadeb 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java @@ -19,9 +19,13 @@ import java.io.InputStream; import java.nio.channels.Channels; import java.nio.channels.FileChannel; +import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; /** @@ -108,11 +112,23 @@ public static class TransferFileSnapshot extends FileSnapshot { private final long primaryTerm; private Long checksum; - - public TransferFileSnapshot(Path path, long primaryTerm, Long checksum) throws IOException { + @Nullable + private Path metadataFilePath; + @Nullable + private Long metadataFileChecksum; + @Nullable + private String metadataFileName; + + public TransferFileSnapshot(Path path, long primaryTerm, Long checksum, Path metadataFilePath, Long metadataFileChecksum) + throws IOException { super(path); this.primaryTerm = primaryTerm; this.checksum = checksum; + Objects.requireNonNull(metadataFilePath); + Objects.requireNonNull(metadataFileChecksum); + this.metadataFilePath = metadataFilePath; + this.metadataFileChecksum = metadataFileChecksum; + this.metadataFileName = metadataFilePath.getFileName().toString(); } public TransferFileSnapshot(String name, byte[] content, long primaryTerm) throws IOException { @@ -120,6 +136,12 @@ public TransferFileSnapshot(String name, byte[] content, long primaryTerm) throw this.primaryTerm = primaryTerm; } + public TransferFileSnapshot(Path path, long primaryTerm, Long checksum) throws IOException { + super(path); + this.primaryTerm = primaryTerm; + this.checksum = checksum; + } + public Long getChecksum() { return checksum; } @@ -128,6 +150,56 @@ public long getPrimaryTerm() { return primaryTerm; } + public Path getMetadataFilePath() { + return metadataFilePath; + } + + public Long getMetadataFileChecksum() { + return metadataFileChecksum; + } + + public long getMinTranslogGeneration() { + return getMinTranslogGeneration(); + } + + public String getMetadataFileName() { + return metadataFileName; + } + + public Map prepareFileMetadata() throws IOException { + if (!(this instanceof TranslogCheckpointFileSnapshot)) { + return null; + } + + String ckpAsString = provideCheckpointDataAsString(); + Long checkpointChecksum = metadataFileChecksum; + + assert checkpointChecksum != null : "checksum can not be null"; + + Map metadata = new HashMap<>(); + metadata.put(TranslogFileSnapshot.CHECKPOINT_FILE_DATA_KEY, ckpAsString); + metadata.put(TranslogFileSnapshot.CHECKPOINT_FILE_CHECKSUM_KEY, checkpointChecksum.toString()); + return metadata; + } + + public String provideCheckpointDataAsString() throws IOException { + return buildCheckpointDataAsBase64String(metadataFilePath); + } + + static String buildCheckpointDataAsBase64String(Path checkpointFilePath) throws IOException { + long fileSize = Files.size(checkpointFilePath); + assert fileSize < 1500 : "checkpoint file size is more than 1.5KB size, can't be stored as metadata"; + byte[] fileBytes = Files.readAllBytes(checkpointFilePath); + return Base64.getEncoder().encodeToString(fileBytes); + } + + public static byte[] convertBase64StringToCheckpointFileDataBytes(String base64CheckpointString) { + if (base64CheckpointString == null) { + return null; + } + return Base64.getDecoder().decode(base64CheckpointString); + } + @Override public int hashCode() { return Objects.hash(primaryTerm, super.hashCode()); @@ -153,16 +225,50 @@ public boolean equals(Object o) { public static final class TranslogFileSnapshot extends TransferFileSnapshot { private final long generation; + private Path checkpointFilePath; + private Long checkpointChecksum; + public final static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; + public final static String CHECKPOINT_FILE_CHECKSUM_KEY = "ckp-checksum"; public TranslogFileSnapshot(long primaryTerm, long generation, Path path, Long checksum) throws IOException { super(path, primaryTerm, checksum); this.generation = generation; } + public void setCheckpointFilePath(Path checkpointFilePath) { + this.checkpointFilePath = checkpointFilePath; + } + + public void setCheckpointChecksum(Long checkpointChecksum) { + this.checkpointChecksum = checkpointChecksum; + } + + public String provideCheckpointDataAsString() throws IOException { + return buildCheckpointDataAsBase64String(checkpointFilePath); + } + + static String buildCheckpointDataAsBase64String(Path checkpointFilePath) throws IOException { + long fileSize = Files.size(checkpointFilePath); + assert fileSize < 1500 : "checkpoint file size is more than 1.5KB size, can't be stored as metadata"; + byte[] fileBytes = Files.readAllBytes(checkpointFilePath); + return Base64.getEncoder().encodeToString(fileBytes); + } + + public static byte[] convertBase64StringToCheckpointFileDataBytes(String base64CheckpointString) { + if (base64CheckpointString == null) { + return null; + } + return Base64.getDecoder().decode(base64CheckpointString); + } + public long getGeneration() { return generation; } + public Long getCheckpointChecksum() { + return checkpointChecksum; + } + @Override public int hashCode() { return Objects.hash(generation, super.hashCode()); @@ -223,4 +329,34 @@ public boolean equals(Object o) { return false; } } + + /** + * Single snapshot of combined translog.tlog and translog.ckp files that gets transferred + * + * @opensearch.internal + */ + public static final class TranslogCheckpointFileSnapshot extends TransferFileSnapshot { + + private final long generation; + private final long minTranslogGeneration; + + public TranslogCheckpointFileSnapshot( + Path tlogFilepath, + long primaryTerm, + Long tlogFilechecksum, + long generation, + long minTranslogGeneration, + Path ckpFilePath, + Long ckpFileChecksum + ) throws IOException { + super(tlogFilepath, primaryTerm, tlogFilechecksum, ckpFilePath, ckpFileChecksum); + this.minTranslogGeneration = minTranslogGeneration; + this.generation = generation; + } + + public long getGeneration() { + return generation; + } + + } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java index f3c17cdaa0054..e083430aaddcc 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java @@ -77,6 +77,7 @@ public void onSuccess(TransferFileSnapshot fileSnapshot) { } add(fileSnapshot.getName(), TransferState.SUCCESS); + add(fileSnapshot.getMetadataFileName(), TransferState.SUCCESS); } void add(String file, boolean success) { @@ -130,7 +131,7 @@ public Set allUploaded() { /** * Represents the state of the upload operation */ - private enum TransferState { + enum TransferState { SUCCESS, FAILED; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index 0894ebf500ebd..3fd3bb3d89d7e 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; /** * Interface for the translog transfer service responsible for interacting with a remote store @@ -147,4 +148,8 @@ void listAllInSortedOrderAsync( ActionListener> listener ); + ConcurrentHashMap getFileTransferTrackerCache(); + + boolean isObjectMetadataUploadSupported(); + } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java index ef34fd31a296b..6d427853d8f37 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -10,6 +10,7 @@ import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TranslogCheckpointFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; import java.util.Set; @@ -39,4 +40,10 @@ public interface TransferSnapshot { * @return the translog transfer metadata */ TranslogTransferMetadata getTranslogTransferMetadata(); + + /** + * The single snapshot of the translog and checkpoint generational files + * @return the set of {@link TranslogCheckpointFileSnapshot} + */ + Set getTranslogCheckpointFileSnapshots(); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index fb78731246a07..8acee8795344c 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -25,6 +25,7 @@ import static org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogCheckpointFileSnapshot; import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; /** @@ -35,6 +36,7 @@ public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Closeable { private final Set> translogCheckpointFileInfoTupleSet; + private final Set translogCheckpointFileSnapshotSet; private final int size; private final long generation; private final long primaryTerm; @@ -44,13 +46,30 @@ public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Clo TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size, String nodeId) { translogCheckpointFileInfoTupleSet = new HashSet<>(size); + translogCheckpointFileSnapshotSet = new HashSet<>(size); this.size = size; this.generation = generation; this.primaryTerm = primaryTerm; this.nodeId = nodeId; } - private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { + private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) throws IOException { + // set checkpoint file path and checkpoint file checksum for a translog file + translogFileSnapshot.setCheckpointFilePath(checkPointFileSnapshot.getPath()); + translogFileSnapshot.setCheckpointChecksum(checkPointFileSnapshot.getChecksum()); + + translogCheckpointFileSnapshotSet.add( + new TranslogCheckpointFileSnapshot( + translogFileSnapshot.getPath(), + translogFileSnapshot.getPrimaryTerm(), + translogFileSnapshot.getChecksum(), + translogFileSnapshot.getGeneration(), + checkPointFileSnapshot.getMinTranslogGeneration(), + checkPointFileSnapshot.getPath(), + checkPointFileSnapshot.getChecksum() + ) + ); + translogCheckpointFileInfoTupleSet.add(Tuple.tuple(translogFileSnapshot, checkPointFileSnapshot)); assert translogFileSnapshot.getGeneration() == checkPointFileSnapshot.getGeneration(); } @@ -64,6 +83,11 @@ public Set getTranslogFileSnapshots() { return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v1).collect(Collectors.toSet()); } + @Override + public Set getTranslogCheckpointFileSnapshots() { + return new HashSet<>(translogCheckpointFileSnapshotSet); + } + @Override public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata( diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 1087244623b87..5c98b186671aa 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.FetchBlobResult; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -27,6 +28,7 @@ import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogCheckedContainer; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.threadpool.ThreadPool; @@ -41,12 +43,16 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot.CHECKPOINT_FILE_CHECKSUM_KEY; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot.CHECKPOINT_FILE_DATA_KEY; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot.convertBase64StringToCheckpointFileDataBytes; /** * The class responsible for orchestrating the transfer of a {@link TransferSnapshot} via a {@link TransferService} @@ -101,8 +107,8 @@ public ShardId getShardId() { public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTransferListener translogTransferListener) throws IOException { - List exceptionList = new ArrayList<>(transferSnapshot.getTranslogTransferMetadata().getCount()); - Set toUpload = new HashSet<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + List exceptionList = new ArrayList<>(transferSnapshot.getTranslogTransferMetadata().getCount() / 2); + Set toUploadCombined = new HashSet<>(transferSnapshot.getTranslogTransferMetadata().getCount() / 2); long metadataBytesToUpload; long metadataUploadStartTime; long uploadStartTime; @@ -110,18 +116,28 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); try { - toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); - toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); - if (toUpload.isEmpty()) { + + toUploadCombined.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogCheckpointFileSnapshots())); + + if (toUploadCombined.isEmpty()) { logger.trace("Nothing to upload for transfer"); return true; } - fileTransferTracker.recordBytesForFiles(toUpload); + fileTransferTracker.recordBytesForFiles(toUploadCombined); + captureStatsBeforeUpload(); - final CountDownLatch latch = new CountDownLatch(toUpload.size()); + + ConcurrentHashMap transferServiceFileTrackerCache = transferService + .getFileTransferTrackerCache(); + + final CountDownLatch latch = new CountDownLatch(toUploadCombined.size()); LatchedActionListener latchedActionListener = new LatchedActionListener<>( - ActionListener.wrap(fileTransferTracker::onSuccess, ex -> { + ActionListener.wrap(fileSnapshot -> { + fileTransferTracker.onSuccess(fileSnapshot); + transferServiceFileTrackerCache.remove(fileSnapshot.getName()); + transferServiceFileTrackerCache.remove(fileSnapshot.getMetadataFileName()); + }, ex -> { assert ex instanceof FileTransferException; logger.error( () -> new ParameterizedMessage( @@ -138,7 +154,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans latch ); Map blobPathMap = new HashMap<>(); - toUpload.forEach( + toUploadCombined.forEach( fileSnapshot -> blobPathMap.put( fileSnapshot.getPrimaryTerm(), remoteDataTransferPath.add(String.valueOf(fileSnapshot.getPrimaryTerm())) @@ -149,7 +165,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans // TODO: Ideally each file's upload start time should be when it is actually picked for upload // https://github.com/opensearch-project/OpenSearch/issues/9729 fileTransferTracker.recordFileTransferStartTime(uploadStartTime); - transferService.uploadBlobs(toUpload, blobPathMap, latchedActionListener, WritePriority.HIGH); + transferService.uploadBlobs(toUploadCombined, blobPathMap, latchedActionListener, WritePriority.HIGH); try { if (latch.await(remoteStoreSettings.getClusterRemoteTranslogTransferTimeout().millis(), TimeUnit.MILLISECONDS) == false) { @@ -236,16 +252,60 @@ public boolean downloadTranslog(String primaryTerm, String generation, Path loca generation, location ); - // Download Checkpoint file from remote to local FS - String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); - downloadToFS(ckpFileName, location, primaryTerm); - // Download translog file from remote to local FS + + // Download translog file with object metadata from remote to local FS String translogFilename = Translog.getFilename(Long.parseLong(generation)); - downloadToFS(translogFilename, location, primaryTerm); + downloadTlogFileToFS(translogFilename, location, primaryTerm, generation); return true; } - private void downloadToFS(String fileName, Path location, String primaryTerm) throws IOException { + private void downloadTlogFileToFS(String fileName, Path location, String primaryTerm, String generation) throws IOException { + Path filePath = location.resolve(fileName); + // Here, we always override the existing file if present. + // We need to change this logic when we introduce incremental download + if (Files.exists(filePath)) { + Files.delete(filePath); + } + + boolean downloadStatus = false; + long bytesToRead = 0, downloadStartTime = System.nanoTime(); + Map metadata; + + try ( + FetchBlobResult fetchBlobResult = transferService.downloadBlobWithMetadata(remoteDataTransferPath.add(primaryTerm), fileName) + ) { + InputStream inputStream = fetchBlobResult.getInputStream(); + metadata = fetchBlobResult.getMetadata(); + + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + + logger.info("downloaded translog for fileName = {}, with metadata = {}", fileName, metadata); + } finally { + remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + if (downloadStatus) { + remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); + } + } + + // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync + fileTransferTracker.add(fileName, true); + + try { + if (metadata == null || metadata.isEmpty()) { + logger.info("metadata is null. Download checkpoint file from remote store separately"); + String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); + downloadCkpFileToFS(ckpFileName, location, primaryTerm); + } else { + writeCkpFileFromMetadata(metadata, location, generation, fileName); + } + } catch (Exception e) { + throw new IOException("Failed to download translog file from remote", e); + } + } + + private void downloadCkpFileToFS(String fileName, Path location, String primaryTerm) throws IOException { Path filePath = location.resolve(fileName); // Here, we always override the existing file if present. // We need to change this logic when we introduce incremental download @@ -271,6 +331,73 @@ private void downloadToFS(String fileName, Path location, String primaryTerm) th fileTransferTracker.add(fileName, true); } + private void writeCkpFileFromMetadata(Map metadata, Path location, String generation, String fileName) + throws IOException { + + try { + String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); + Path filePath = location.resolve(ckpFileName); + + // Here, we always override the existing file if present. + if (Files.exists(filePath)) { + Files.delete(filePath); + } + + String ckpDataBase64 = metadata.get(CHECKPOINT_FILE_DATA_KEY); + String checksumKeyValue = metadata.get(CHECKPOINT_FILE_CHECKSUM_KEY); + if (ckpDataBase64 == null) { + throw new IllegalStateException( + "Checkpoint file data (ckp-data) key is expected but not found in metadata for file: " + fileName + ); + } + if (checksumKeyValue == null) { + throw new IllegalStateException( + "Checkpoint file checksum (ckp-checksum) key is expected but not found in metadata for file: " + fileName + ); + } + + byte[] ckpFileBytes = convertBase64StringToCheckpointFileDataBytes(ckpDataBase64); + Long remoteDataChecksum = Long.parseLong(checksumKeyValue); + + TranslogCheckedContainer translogCheckedContainer = new TranslogCheckedContainer(ckpFileBytes); + Long currentDataChecksum = translogCheckedContainer.getChecksum(); + + if (currentDataChecksum.equals(remoteDataChecksum)) { + logger.debug( + "Checksum verification successful. currentDataChecksum={}, remoteDataChecksum={}", + currentDataChecksum, + remoteDataChecksum + ); + } else { + logger.warn( + "Checksum verification failed. currentDataChecksum={}, remoteDataChecksum={}", + currentDataChecksum, + remoteDataChecksum + ); + throw new RuntimeException( + "Checksum verification failed for file: " + + fileName + + ". currentDataChecksum=" + + currentDataChecksum + + ", remoteChecksum=" + + remoteDataChecksum + ); + } + + Files.write(filePath, ckpFileBytes); + + // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync + fileTransferTracker.add(ckpFileName, true); + logger.info("Wrote checkpoint file for fileName: {}", fileName); + } catch (IOException e) { + logger.error("Error writing checkpoint file for file: {}", fileName); + throw e; + } catch (IllegalStateException e) { + logger.error("Error processing metadata for file: {}", fileName); + throw e; + } + } + public TranslogTransferMetadata readMetadata() throws IOException { SetOnce metadataSetOnce = new SetOnce<>(); SetOnce exceptionSetOnce = new SetOnce<>(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 7e2ea5a77cbfa..0187a9fb3b8ba 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -124,7 +124,6 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -306,18 +305,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for - * remote store enabled cluster. - */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.prefix.type", - PathType.FIXED.toString(), - PathType::parseString, - Property.NodeScope, - Property.Dynamic - ); - /** * The node's settings. */ diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 7f2121093f8e8..e0a9f7a9e05c1 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -8,6 +8,7 @@ package org.opensearch.indices; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -15,6 +16,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums; /** * Settings for remote store @@ -65,12 +67,41 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for + * remote store enabled cluster. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", + RemoteStoreEnums.PathType.FIXED.toString(), + RemoteStoreEnums.PathType::parseString, + Property.NodeScope, + Property.Dynamic + ); + + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + RemoteStoreEnums.PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; + private volatile RemoteStoreEnums.PathType pathType; + private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval @@ -82,11 +113,17 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { this::setMinRemoteSegmentMetadataFiles ); - this.clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, this::setClusterRemoteTranslogTransferTimeout ); + + pathType = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setPathType); + + pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -112,4 +149,22 @@ public TimeValue getClusterRemoteTranslogTransferTimeout() { private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; } + + @ExperimentalApi + public RemoteStoreEnums.PathType getPathType() { + return pathType; + } + + @ExperimentalApi + public RemoteStoreEnums.PathHashAlgorithm getPathHashAlgorithm() { + return pathHashAlgorithm; + } + + private void setPathType(RemoteStoreEnums.PathType pathType) { + this.pathType = pathType; + } + + private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { + this.pathHashAlgorithm = pathHashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 7fa2b6c8ff497..a33fd71e21896 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -863,7 +863,8 @@ protected Node( xContentRegistry, systemIndices, forbidPrivateIndexSettings, - awarenessReplicaBalance + awarenessReplicaBalance, + remoteStoreSettings ); pluginsService.filterPlugins(Plugin.class) .forEach( diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index c76ea71c0a094..cd8714f6b556a 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -107,6 +107,7 @@ import java.util.function.LongSupplier; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; /** * The main search context used during search phase @@ -187,6 +188,7 @@ final class DefaultSearchContext extends SearchContext { private final Function requestToAggReduceContextBuilder; private final boolean concurrentSearchSettingsEnabled; private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); + private final int maxAggRewriteFilters; DefaultSearchContext( ReaderContext readerContext, @@ -240,6 +242,8 @@ final class DefaultSearchContext extends SearchContext { queryBoost = request.indexBoost(); this.lowLevelCancellation = lowLevelCancellation; this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; + + this.maxAggRewriteFilters = evaluateFilterRewriteSetting(); } @Override @@ -994,4 +998,16 @@ public boolean shouldUseTimeSeriesDescSortOptimization() { && sort.isSortOnTimeSeriesField() && sort.sort.getSort()[0].getReverse() == false; } + + @Override + public int maxAggRewriteFilters() { + return maxAggRewriteFilters; + } + + private int evaluateFilterRewriteSetting() { + if (clusterService != null) { + return clusterService.getClusterSettings().get(MAX_AGGREGATION_REWRITE_FILTERS); + } + return 0; + } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index f2328bea65eb1..6b3620e65a271 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -272,6 +272,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + // value 0 means rewrite filters optimization in aggregations will be disabled + public static final Setting MAX_AGGREGATION_REWRITE_FILTERS = Setting.intSetting( + "search.max_aggregation_rewrite_filters", + 72, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java index e587b7f169e5f..dde748bf0dc07 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java @@ -63,7 +63,6 @@ private FastFilterRewriteHelper() {} private static final Logger logger = LogManager.getLogger(FastFilterRewriteHelper.class); - private static final int MAX_NUM_FILTER_BUCKETS = 1024; private static final Map, Function> queryWrappers; // Initialize the wrapper map for unwrapping the query @@ -186,8 +185,9 @@ private static Weight[] createFilterForAggregations( int bucketCount = 0; while (roundedLow <= fieldType.convertNanosToMillis(high)) { bucketCount++; - if (bucketCount > MAX_NUM_FILTER_BUCKETS) { - logger.debug("Max number of filters reached [{}], skip the fast filter optimization", MAX_NUM_FILTER_BUCKETS); + int maxNumFilterBuckets = context.maxAggRewriteFilters(); + if (bucketCount > maxNumFilterBuckets) { + logger.debug("Max number of filters reached [{}], skip the fast filter optimization", maxNumFilterBuckets); return null; } // Below rounding is needed as the interval could return in @@ -254,6 +254,8 @@ public void setAggregationType(AggregationType aggregationType) { } public boolean isRewriteable(final Object parent, final int subAggLength) { + if (context.maxAggRewriteFilters() == 0) return false; + boolean rewriteable = aggregationType.isRewriteable(parent, subAggLength); logger.debug("Fast filter rewriteable: {} for shard {}", rewriteable, context.indexShard().shardId()); this.rewriteable = rewriteable; diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 8f6001dc06755..07f3616bbc138 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -522,4 +522,8 @@ public String toString() { public abstract int getTargetMaxSliceCount(); public abstract boolean shouldUseTimeSeriesDescSortOptimization(); + + public int maxAggRewriteFilters() { + return 0; + } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index b79a6a88250f8..e6a6b747c2baf 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -680,9 +680,9 @@ private Settings getOverrideSettingsInternal() { // We will use whatever replication strategy provided by user or from snapshot metadata unless // cluster is remote store enabled or user have restricted a specific replication type in the // cluster. If cluster is undergoing remote store migration, replication strategy is strictly SEGMENT type - if (RemoteStoreNodeAttribute.isRemoteStoreAttributePresent(clusterService.getSettings()) == true - || clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING) == true - || RemoteStoreNodeService.isMigratingToRemoteStore(clusterSettings) == true) { + if (RemoteStoreNodeAttribute.isRemoteStoreAttributePresent(clusterService.getSettings()) + || clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING) + || RemoteStoreNodeService.isMigratingToRemoteStore(clusterSettings)) { MetadataCreateIndexService.updateReplicationStrategy( settingsBuilder, request.indexSettings(), diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index da9a8b928a779..50ffd7322544a 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -81,6 +81,7 @@ import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; @@ -738,7 +739,8 @@ public void testRolloverClusterState() throws Exception { null, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -876,7 +878,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { null, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -1054,7 +1057,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { null, new SystemIndices(emptyMap()), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 538416e1137f5..b3c58164fccbb 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -16,11 +16,15 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.EmptyClusterInfoService; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; @@ -28,14 +32,22 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -44,9 +56,12 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -59,7 +74,9 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.BrokenBarrierException; @@ -68,8 +85,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.opensearch.test.VersionUtils.randomCompatibleVersion; +import static org.opensearch.test.VersionUtils.randomOpenSearchVersion; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -692,4 +716,198 @@ protected void masterOperation(Task task, Request request, ClusterState state, A assertFalse(retried.get()); assertFalse(exception.get()); } + + public void testDontAllowSwitchingToStrictCompatibilityModeForMixedCluster() { + Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + // request to change cluster compatibility mode to STRICT + Settings currentCompatibilityModeSettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .build(); + Settings intendedCompatibilityModeSettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.STRICT) + .build(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(intendedCompatibilityModeSettings); + + // mixed cluster (containing both remote and non-remote nodes) + DiscoveryNode nonRemoteNode1 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode1 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + getRemoteStoreNodeAttributes(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode1) + .localNodeId(nonRemoteNode1.getId()) + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .build(); + + Metadata metadata = Metadata.builder().persistentSettings(currentCompatibilityModeSettings).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).nodes(discoveryNodes).build(); + AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + TransportClusterUpdateSettingsAction transportClusterUpdateSettingsAction = new TransportClusterUpdateSettingsAction( + transportService, + clusterService, + threadPool, + allocationService, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + clusterService.getClusterSettings() + ); + + final SettingsException exception = expectThrows( + SettingsException.class, + () -> transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, clusterState) + ); + assertEquals( + "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes", + exception.getMessage() + ); + + DiscoveryNode nonRemoteNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + getRemoteStoreNodeAttributes(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + // cluster with only non-remote nodes + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode1) + .localNodeId(nonRemoteNode1.getId()) + .add(nonRemoteNode2) + .localNodeId(nonRemoteNode2.getId()) + .build(); + ClusterState sameTypeClusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).build(); + transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameTypeClusterState); + + // cluster with only non-remote nodes + discoveryNodes = DiscoveryNodes.builder() + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .add(remoteNode2) + .localNodeId(remoteNode2.getId()) + .build(); + sameTypeClusterState = ClusterState.builder(sameTypeClusterState).nodes(discoveryNodes).build(); + transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameTypeClusterState); + } + + public void testDontAllowSwitchingCompatibilityModeForClusterWithMultipleVersions() { + Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + // request to change cluster compatibility mode + boolean toStrictMode = randomBoolean(); + Settings currentCompatibilityModeSettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .build(); + Settings intendedCompatibilityModeSettings = Settings.builder() + .put( + REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), + toStrictMode ? RemoteStoreNodeService.CompatibilityMode.STRICT : RemoteStoreNodeService.CompatibilityMode.MIXED + ) + .build(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(intendedCompatibilityModeSettings); + + // two different but compatible open search versions for the discovery nodes + final Version version1 = randomOpenSearchVersion(random()); + final Version version2 = randomCompatibleVersion(random(), version1); + + assert version1.equals(version2) == false : "current nodes in the cluster must be of different versions"; + DiscoveryNode discoveryNode1 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + toStrictMode ? getRemoteStoreNodeAttributes() : Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + version1 + ); + DiscoveryNode discoveryNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + toStrictMode ? getRemoteStoreNodeAttributes() : Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + version2 // not same as discoveryNode1 + ); + + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(discoveryNode1) + .localNodeId(discoveryNode1.getId()) + .add(discoveryNode2) + .localNodeId(discoveryNode2.getId()) + .build(); + + Metadata metadata = Metadata.builder().persistentSettings(currentCompatibilityModeSettings).build(); + + ClusterState differentVersionClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(metadata) + .nodes(discoveryNodes) + .build(); + AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + TransportClusterUpdateSettingsAction transportClusterUpdateSettingsAction = new TransportClusterUpdateSettingsAction( + transportService, + clusterService, + threadPool, + allocationService, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + clusterService.getClusterSettings() + ); + + // changing compatibility mode when all nodes are not of the same version + final SettingsException exception = expectThrows( + SettingsException.class, + () -> transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, differentVersionClusterState) + ); + assertThat( + exception.getMessage(), + containsString("can not change the compatibility mode when all the nodes in cluster are not of the same version") + ); + + // changing compatibility mode when all nodes are of the same version + discoveryNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + toStrictMode ? getRemoteStoreNodeAttributes() : Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + version1 // same as discoveryNode1 + ); + discoveryNodes = DiscoveryNodes.builder() + .add(discoveryNode1) + .localNodeId(discoveryNode1.getId()) + .add(discoveryNode2) + .localNodeId(discoveryNode2.getId()) + .build(); + + ClusterState sameVersionClusterState = ClusterState.builder(differentVersionClusterState).nodes(discoveryNodes).build(); + transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameVersionClusterState); + } + + private Map getRemoteStoreNodeAttributes() { + Map remoteStoreNodeAttributes = new HashMap<>(); + remoteStoreNodeAttributes.put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-segment-repo-1"); + remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); + return remoteStoreNodeAttributes; + } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index d3086de6ec89e..fad98a6609c3b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -76,10 +76,12 @@ import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -702,7 +704,8 @@ public void testValidateIndexName() throws Exception { null, new SystemIndices(Collections.emptyMap()), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); validateIndexName( checkerService, @@ -788,7 +791,8 @@ public void testValidateDotIndex() { null, new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1213,7 +1217,8 @@ public void testvalidateIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); @@ -1332,7 +1337,8 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); // Use DOCUMENT replication type setting for index creation final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); @@ -1457,7 +1463,8 @@ public void testRemoteStoreDisabledByUserIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1491,7 +1498,8 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1530,7 +1538,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1711,7 +1720,7 @@ public void testRemoteCustomData() { validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A.name() + PathHashAlgorithm.FNV_1A_COMPOSITE_1.name() ); } @@ -1720,7 +1729,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); @@ -1734,6 +1743,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType when(clusterService.getSettings()).thenReturn(settings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); when(clusterService.state()).thenReturn(clusterState); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); ThreadPool threadPool = new TestThreadPool(getTestName()); MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( @@ -1749,7 +1759,8 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), + remoteStoreSettings ); CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() @@ -1872,7 +1883,8 @@ public void testIndexLifecycleNameSetting() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 0b8e64e31a523..0b99ffac67ee8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -55,6 +55,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; @@ -2051,7 +2052,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr xContentRegistry, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index d9662d1de9e0c..98a79f3ca38dc 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.is; public class DateUtilsTests extends OpenSearchTestCase { - private static final Set IGNORE = new HashSet<>(Arrays.asList("America/Ciudad_Juarez")); + private static final Set IGNORE = new HashSet<>(Arrays.asList("Antarctica/Vostok")); public void testTimezoneIds() { assertNull(DateUtils.dateTimeZoneToZoneId(null)); diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java index 868c2175d0689..c92ee191193c6 100644 --- a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java @@ -10,10 +10,12 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TestUtil; import java.util.TreeMap; +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") public class FuzzyFilterPostingsFormatTests extends BasePostingsFormatTestCase { private TreeMap params = new TreeMap<>() { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index fe5635063f783..575b397382f24 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -25,7 +25,8 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -161,10 +162,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -178,7 +179,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); // Translog Metadata @@ -190,10 +191,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -204,7 +205,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. @@ -238,10 +239,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -252,7 +253,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); // Segment Metadata @@ -264,10 +265,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -278,7 +279,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); // Segment Lockfiles @@ -290,10 +291,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -304,10 +305,197 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); } + public void testGeneratePathForHashedPrefixTypeAndFNVCompositeHashAlgorithm() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("D10000001001000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "o00101001010011/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", + result.buildAsString() + ); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("A01010000000101/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "e10101111000001/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", + result.buildAsString() + ); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "K01111001100000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", + result.buildAsString() + ); + } + public void testGeneratePathForHashedInfixType() { BlobPath blobPath = new BlobPath(); List pathList = getPathList(); @@ -330,7 +518,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); String expected = derivePath(basePath, pathInput); String actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -346,7 +534,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -361,7 +549,7 @@ public void testGeneratePathForHashedInfixType() { .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -374,7 +562,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -410,7 +598,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -423,7 +611,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -437,7 +625,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -450,7 +638,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -464,7 +652,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -477,7 +665,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -487,7 +675,7 @@ private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( SEPARATOR, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), @@ -496,7 +684,7 @@ private String derivePath(String basePath, PathInput pathInput) { : String.join( SEPARATOR, basePath, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 9d4b41f5c395f..d28ebc8c2e5da 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -11,35 +11,138 @@ import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { - Settings settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) - .build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); assertNull(resolver.get().getHashAlgorithm()); } public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); if (pathType.requiresHashAlgorithm()) { assertNotNull(resolver.get().getHashAlgorithm()); } else { assertNull(resolver.get().getHashAlgorithm()); } + } + + public void testGetStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } + public void testGetStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 34074861f2764..4d3e633848975 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -14,13 +14,19 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; +import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -28,6 +34,16 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private static Map BASE64_CHARSET_IDX_MAP; + + static { + Map charToIndexMap = new HashMap<>(); + for (int i = 0; i < URL_BASE64_CHARSET.length; i++) { + charToIndexMap.put(URL_BASE64_CHARSET[i], i); + } + BASE64_CHARSET_IDX_MAP = Collections.unmodifiableMap(charToIndexMap); + } + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, @@ -205,8 +221,106 @@ public void testLongToBase64() { "6kv3yZNv9kY" ); for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + String base64Str = longToUrlBase64(entry.getKey()); + assertEquals(entry.getValue(), base64Str); assertEquals(11, entry.getValue().length()); + assertEquals((long) entry.getKey(), urlBase64ToLong(base64Str)); + } + + int iters = randomInt(100); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String base64Str = longToUrlBase64(value); + assertEquals(value, urlBase64ToLong(base64Str)); } } + + public void testLongToCompositeUrlBase64AndBinaryEncodingUsing20Bits() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s11001001010100", + -5878421770170594047L, + "r10011010111010", + -5147010836697060622L, + "u00100100100010", + 937096430362711837L, + "D01000000010011", + 8422273604115462710L, + "d00111000011110", + -2528761975013221124L, + "300111010000000", + -5512387536280560513L, + "s11100000000001", + -5749656451579835857L, + "s00001101010001", + 5569654857969679538L, + "T01010010110110", + -1563884000447039930L, + "610010010111111" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + String base64Str = RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(entry.getKey(), 20); + assertEquals(entry.getValue(), base64Str); + assertEquals(15, entry.getValue().length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), base64Str.charAt(0)); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(value, 20).charAt(0), longToUrlBase64(value).charAt(0)); + } + } + + public void testLongToCompositeUrlBase64AndBinaryEncoding() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s1100100101010001110111011101001000000001101010101101001100", + -5878421770170594047L, + "r1001101011101001101000101110010101000011110000110100000001", + -5147010836697060622L, + "u0010010010001001001110100111111111100101011110101011110010", + 937096430362711837L, + "D0100000001001111000011110100001100000011100101011100011101", + 8422273604115462710L, + "d0011100001111011010011100001000110011100110111101000110110", + -2528761975013221124L, + "30011101000000010000110000110110101110100100101110011111100", + -5512387536280560513L, + "s1110000000000100001011110111011011101101001101110001111111", + -5749656451579835857L, + "s0000110101000111011110101110010111000011010000101000101111", + 5569654857969679538L, + "T0101001011011000111001010110000010110011111011110010110010", + -1563884000447039930L, + "61001001011111101111100100110010011011011111111011001000110" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + Long hashValue = entry.getKey(); + String expectedCompositeEncoding = entry.getValue(); + String actualCompositeEncoding = longToCompositeBase64AndBinaryEncoding(hashValue, 64); + assertEquals(expectedCompositeEncoding, actualCompositeEncoding); + assertEquals(59, expectedCompositeEncoding.length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), actualCompositeEncoding.charAt(0)); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(hashValue, 20), actualCompositeEncoding.substring(0, 15)); + + Long computedHashValue = compositeUrlBase64BinaryEncodingToLong(actualCompositeEncoding); + assertEquals(hashValue, computedHashValue); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String compositeEncoding = longToCompositeBase64AndBinaryEncoding(value, 64); + assertEquals(value, compositeUrlBase64BinaryEncodingToLong(compositeEncoding)); + } + } + + static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { + char ch = encodedValue.charAt(0); + int base64BitsIntValue = BASE64_CHARSET_IDX_MAP.get(ch); + String base64PartBinary = Integer.toBinaryString(base64BitsIntValue); + String binaryString = base64PartBinary + encodedValue.substring(1); + return new BigInteger(binaryString, 2).longValue(); + } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index e3259a3097278..e81eef67d6704 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -104,7 +104,7 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - // Case 3 - with just hashed prefix type and hash algorithm + // Case 3 - with just hashed prefix type and FNV_1A_BASE64 hash algorithm shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( snapshot, indexVersion, @@ -119,7 +119,7 @@ public void testToXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 ); try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); @@ -134,6 +134,99 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + ",\"path_hash_algorithm\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 4 - with just hashed prefix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 5 - with just hashed infix type and FNV_1A_BASE64 hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 6 - with just hashed infix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -223,7 +316,88 @@ public void testFromXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 ); try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); @@ -232,7 +406,7 @@ public void testFromXContent() throws IOException { } public void testFromXContentInvalid() throws IOException { - final int iters = 14; + final int iters = 18; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -296,21 +470,47 @@ public void testFromXContentInvalid() throws IOException { break; case 10: version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_BASE64 for version=1"; break; case 11: version = "2"; pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_BASE64 for version=2"; break; case 12: + version = "1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=1"; + break; + case 13: + version = "2"; + pathType = PathType.FIXED; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=2"; + break; + case 14: version = "2"; pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; break; - case 13: + case 15: + version = "2"; + pathType = PathType.HASHED_PREFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 16: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + break; + case 17: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 18: break; default: fail("shouldn't be here"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 44ddd2de9d007..b1e2028d761f0 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -706,7 +706,7 @@ public void testCleanupAsync() throws Exception { ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); RemoteStorePathStrategy pathStrategy = randomFrom( new RemoteStorePathStrategy(PathType.FIXED), - new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) + new RemoteStorePathStrategy(randomFrom(PathType.HASHED_INFIX, PathType.HASHED_PREFIX), randomFrom(PathHashAlgorithm.values())) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 0e3854f65135f..28979a3dc4f28 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -1716,6 +1716,82 @@ public void testDownloadWithRetries() throws IOException { RemoteFsTranslog.download(mockTransfer, location, logger); } + // No translog data in local as well as remote, we skip creating empty translog + public void testDownloadWithNoTranslogInLocalAndRemote() throws IOException { + Path location = createTempDir(); + + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path[] filesBeforeDownload = FileSystemUtils.files(location); + RemoteFsTranslog.download(mockTransfer, location, logger); + assertEquals(filesBeforeDownload, FileSystemUtils.files(location)); + } + + // No translog data in remote but non-empty translog is present in local. In this case, we delete all the files + // from local file system and create empty translog + public void testDownloadWithTranslogOnlyInLocal() throws IOException { + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path location = createTempDir(); + for (Path file : FileSystemUtils.files(translogDir)) { + Files.copy(file, location.resolve(file.getFileName())); + } + + Checkpoint existingCheckpoint = Translog.readCheckpoint(location); + + TranslogTransferManager finalMockTransfer = mockTransfer; + RemoteFsTranslog.download(finalMockTransfer, location, logger); + + Path[] filesPostDownload = FileSystemUtils.files(location); + assertEquals(2, filesPostDownload.length); + assertTrue( + filesPostDownload[0].getFileName().toString().contains("translog.ckp") + || filesPostDownload[1].getFileName().toString().contains("translog.ckp") + ); + + Checkpoint newEmptyTranslogCheckpoint = Translog.readCheckpoint(location); + // Verify that the new checkpoint points to empty translog + assertTrue( + newEmptyTranslogCheckpoint.generation == newEmptyTranslogCheckpoint.minTranslogGeneration + && newEmptyTranslogCheckpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && newEmptyTranslogCheckpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && newEmptyTranslogCheckpoint.numOps == 0 + ); + assertTrue(newEmptyTranslogCheckpoint.generation > existingCheckpoint.generation); + assertEquals(newEmptyTranslogCheckpoint.globalCheckpoint, existingCheckpoint.globalCheckpoint); + } + + // No translog data in remote and empty translog in local. We skip creating another empty translog + public void testDownloadWithEmptyTranslogOnlyInLocal() throws IOException { + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path location = createTempDir(); + for (Path file : FileSystemUtils.files(translogDir)) { + Files.copy(file, location.resolve(file.getFileName())); + } + + TranslogTransferManager finalMockTransfer = mockTransfer; + + // download first time will ensure creating empty translog + RemoteFsTranslog.download(finalMockTransfer, location, logger); + Path[] filesPostFirstDownload = FileSystemUtils.files(location); + + // download on empty translog should be a no-op + RemoteFsTranslog.download(finalMockTransfer, location, logger); + Path[] filesPostSecondDownload = FileSystemUtils.files(location); + + assertArrayEquals(filesPostFirstDownload, filesPostSecondDownload); + } + public class ThrowingBlobRepository extends FsRepository { private final Environment environment; diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java index 2d75851e888a5..3dabc056f212d 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java @@ -8,12 +8,17 @@ package org.opensearch.index.translog.transfer; +import org.opensearch.index.translog.Translog; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Base64; public class FileSnapshotTests extends OpenSearchTestCase { @@ -69,6 +74,49 @@ public void testFileSnapshotContent() throws IOException { } } + public void testBuildCheckpointDataAsBase64String() throws IOException { + Path file = createTempFile(Translog.TRANSLOG_FILE_PREFIX + 10, Translog.CHECKPOINT_SUFFIX); + Files.writeString(file, "hello_world_with_checkpoint_file_data"); + Files.writeString(file, "hello_world_with_checkpoint_file_data-2"); + Files.writeString(file, "hello_world_with_checkpoint_file_data-4"); + Files.writeString(file, "213123123"); + + String encodedString = FileSnapshot.TranslogFileSnapshot.buildCheckpointDataAsBase64String(file); + + // Assert + assertNotNull(encodedString); + byte[] decoded = Base64.getDecoder().decode(encodedString); + assertArrayEquals(Files.readAllBytes(file), decoded); + } + + public void testBuildCheckpointDataAsBase64StringWhenPathIsNull() throws IOException { + Path file = createTempFile(Translog.TRANSLOG_FILE_PREFIX + 10, Translog.CHECKPOINT_SUFFIX); + Files.writeString(file, "hello_world_with_checkpoint_file_data"); + + assertThrows(NullPointerException.class, () -> FileSnapshot.TranslogFileSnapshot.buildCheckpointDataAsBase64String(null)); + } + + public void testConvertCheckpointBase64StringToBytes() throws IOException { + Path file = createTempFile(Translog.TRANSLOG_FILE_PREFIX + 10, Translog.CHECKPOINT_SUFFIX); + Files.writeString(file, "test-hello_world_with_checkpoint_file_data"); + + String encodedString = FileSnapshot.TranslogFileSnapshot.buildCheckpointDataAsBase64String(file); + + byte[] decodedBytes = FileSnapshot.TranslogFileSnapshot.convertBase64StringToCheckpointFileDataBytes(encodedString); + assertNotNull(encodedString); + assertArrayEquals("test-hello_world_with_checkpoint_file_data".getBytes(StandardCharsets.UTF_8), decodedBytes); + } + + public void testBuildCheckpointDataAsBase64String_whenFileSizeGreaterThan2KB_shouldThrowAssertionError() throws IOException { + Path file = createTempFile(Translog.TRANSLOG_FILE_PREFIX + 10, Translog.CHECKPOINT_SUFFIX); + byte[] data = new byte[2048]; // 2KB + + ByteBuffer buffer = ByteBuffer.wrap(data); + Files.write(file, buffer.array(), StandardOpenOption.WRITE); + + assertThrows(AssertionError.class, () -> FileSnapshot.TranslogFileSnapshot.buildCheckpointDataAsBase64String(file)); + } + private void assertFileSnapshotProperties(Path file) throws IOException { assertEquals(file.getFileName().toString(), fileSnapshot.getName()); assertEquals(Files.size(file), fileSnapshot.getContentLength()); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 81ae479d018b0..dcbdbc6acef5f 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -15,6 +15,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.FetchBlobResult; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; @@ -42,6 +43,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Objects; @@ -117,6 +119,16 @@ public void setUp() throws Exception { Thread.sleep(delayForBlobDownload); return new ByteArrayInputStream(ckpBytes); }); + + when(transferService.downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new FetchBlobResult(new ByteArrayInputStream(tlogBytes), null); + }); + + when(transferService.downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.ckp"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new FetchBlobResult(new ByteArrayInputStream(ckpBytes), null); + }); } @Override @@ -350,6 +362,11 @@ public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); } + @Override + public Set getTranslogCheckpointFileSnapshots() { + return new HashSet<>(); + } + @Override public String toString() { return "test-to-string"; @@ -460,7 +477,7 @@ public void testDownloadTranslogAlreadyExists() throws IOException { translogTransferManager.downloadTranslog("12", "23", location); - verify(transferService).downloadBlob(any(BlobPath.class), eq("translog-23.tlog")); + verify(transferService).downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog")); verify(transferService).downloadBlob(any(BlobPath.class), eq("translog-23.ckp")); assertTrue(Files.exists(location.resolve("translog-23.tlog"))); assertTrue(Files.exists(location.resolve("translog-23.ckp"))); @@ -475,7 +492,7 @@ public void testDownloadTranslogWithTrackerUpdated() throws IOException { translogTransferManager.downloadTranslog("12", "23", location); - verify(transferService).downloadBlob(any(BlobPath.class), eq(translogFile)); + verify(transferService).downloadBlobWithMetadata(any(BlobPath.class), eq(translogFile)); verify(transferService).downloadBlob(any(BlobPath.class), eq(checkpointFile)); assertTrue(Files.exists(location.resolve(translogFile))); assertTrue(Files.exists(location.resolve(checkpointFile))); diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index dc4dca80ea110..17bd821ed0c8c 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -105,6 +105,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; @@ -312,7 +313,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m xContentRegistry, systemIndices, true, - awarenessReplicaBalance + awarenessReplicaBalance, + DefaultRemoteStoreSettings.INSTANCE ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f06d5595afcd5..1faaa16ce5628 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -262,8 +262,12 @@ public void testAlreadyOnNewCheckpoint() { } @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") - public void testShardAlreadyReplicating() { + public void testShardAlreadyReplicating() throws InterruptedException { + // in this case shard is already replicating and we receive an ahead checkpoint with same pterm. + // ongoing replication is not cancelled and new one does not start. CountDownLatch blockGetCheckpointMetadata = new CountDownLatch(1); + CountDownLatch continueGetCheckpointMetadata = new CountDownLatch(1); + CountDownLatch replicationCompleteLatch = new CountDownLatch(1); SegmentReplicationSource source = new TestReplicationSource() { @Override public void getCheckpointMetadata( @@ -272,11 +276,13 @@ public void getCheckpointMetadata( ActionListener listener ) { try { - blockGetCheckpointMetadata.await(); - final CopyState copyState = new CopyState(primaryShard); - listener.onResponse( - new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) - ); + blockGetCheckpointMetadata.countDown(); + continueGetCheckpointMetadata.await(); + try (final CopyState copyState = new CopyState(primaryShard)) { + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + } } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } @@ -297,24 +303,73 @@ public void getSegmentFiles( final SegmentReplicationTarget target = spy( new SegmentReplicationTarget( replicaShard, - primaryShard.getLatestReplicationCheckpoint(), + initialCheckpoint, source, - mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + replicationCompleteLatch.countDown(); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + Assert.fail("Replication should not fail"); + } + } ) ); final SegmentReplicationTargetService spy = spy(sut); - doReturn(false).when(spy).processLatestReceivedCheckpoint(eq(replicaShard), any()); // Start first round of segment replication. spy.startReplication(target); + // wait until we are at getCheckpointMetadata stage + blockGetCheckpointMetadata.await(5, TimeUnit.MINUTES); - // Start second round of segment replication, this should fail to start as first round is still in-progress - spy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); - verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); - blockGetCheckpointMetadata.countDown(); + // try and insert a new target directly - it should fail immediately and alert listener + spy.startReplication( + new SegmentReplicationTarget( + replicaShard, + aheadCheckpoint, + source, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail("Should not succeed"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + assertFalse(sendShardFailure); + assertEquals("Shard " + replicaShard.shardId() + " is already replicating", e.getMessage()); + } + } + ) + ); + + // Start second round of segment replication through onNewCheckpoint, this should fail to start as first round is still in-progress + // aheadCheckpoint is of same pterm but higher version + assertTrue(replicaShard.shouldProcessCheckpoint(aheadCheckpoint)); + spy.onNewCheckpoint(aheadCheckpoint, replicaShard); + verify(spy, times(0)).processLatestReceivedCheckpoint(eq(replicaShard), any()); + // start replication is not invoked with aheadCheckpoint + verify(spy, times(0)).startReplication( + eq(replicaShard), + eq(aheadCheckpoint), + any(SegmentReplicationTargetService.SegmentReplicationListener.class) + ); + continueGetCheckpointMetadata.countDown(); + replicationCompleteLatch.await(5, TimeUnit.MINUTES); } - public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws InterruptedException { + public void testShardAlreadyReplicating_HigherPrimaryTermReceived() throws InterruptedException { // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. SegmentReplicationTargetService serviceSpy = spy(sut); doNothing().when(serviceSpy).updateVisibleCheckpoint(anyLong(), any()); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4326e5fc63961..95a343f3b4025 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2163,7 +2163,8 @@ public void onFailure(final Exception e) { namedXContentRegistry, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); actions.put( CreateIndexAction.INSTANCE, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c26c3f8d21380..286f0a1d91b4c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -142,6 +142,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.IndicesStore; import org.opensearch.monitor.os.OsInfo; @@ -211,7 +212,6 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2096,8 +2096,7 @@ protected boolean addMockGeoShapeFieldMapper() { * @return boolean. */ protected boolean addMockTelemetryPlugin() { - // setting to false until https://github.com/opensearch-project/OpenSearch/issues/12615 is resolved - return false; + return true; } /** @@ -2619,7 +2618,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index f7ebb3ee18a9b..4e72caeea584e 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -8,6 +8,7 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.common.Booleans; import org.opensearch.telemetry.tracing.Span; import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; @@ -29,6 +30,14 @@ public StrictCheckSpanProcessor() {} private static Map spanMap = new ConcurrentHashMap<>(); + // If you want to see the stack trace for each spanData, then + // update the flag to true or set the corresponding system property to true + // This is helpful in debugging the tests. Default value is false. + // Note: Enabling this might lead to OOM issues while running ITs. + private static final boolean isStackTraceForSpanEnabled = Booleans.parseBoolean( + System.getProperty("tests.telemetry.span.stack_traces", "false") + ); + @Override public void onStart(Span span) { spanMap.put(span.getSpanId(), toMockSpanData(span)); @@ -53,6 +62,7 @@ public List getFinishedSpanItems() { private MockSpanData toMockSpanData(Span span) { String parentSpanId = (span.getParentSpan() != null) ? span.getParentSpan().getSpanId() : ""; + StackTraceElement[] stackTrace = isStackTraceForSpanEnabled ? Thread.currentThread().getStackTrace() : null; MockSpanData spanData = new MockSpanData( span.getSpanId(), parentSpanId, @@ -60,7 +70,7 @@ private MockSpanData toMockSpanData(Span span) { System.nanoTime(), false, span.getSpanName(), - Thread.currentThread().getStackTrace(), + stackTrace, (span instanceof MockSpan) ? ((MockSpan) span).getAttributes() : Map.of() ); return spanData;