diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 771bfe694b698..980e08c696d54 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -36,4 +36,5 @@ BWC_VERSION: - "2.15.0" - "2.15.1" - "2.16.0" + - "2.16.1" - "2.17.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1aefeee710f47..e75e22f30431d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,17 +11,27 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/transport-netty4/ @peternied /plugins/identity-shiro/ @peternied +/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + /server/src/main/java/org/opensearch/extensions/ @peternied /server/src/main/java/org/opensearch/identity/ @peternied -/server/src/main/java/org/opensearch/threadpool/ @peternied +/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied /server/src/main/java/org/opensearch/transport/ @peternied -/.github/ @peternied +/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + +/.github/ @jed326 @peternied /MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 5b44198cd3b8e..8f4bad040fe44 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -14,7 +14,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_2": { "description": "Indexing only configuration for HTTP_LOGS workload", @@ -30,7 +31,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_3": { "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-3.0.0", @@ -46,7 +48,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_4": { "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", @@ -62,10 +65,11 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_5": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0", "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", @@ -78,7 +82,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_6": { "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", @@ -94,7 +99,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_7": { "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", @@ -110,10 +116,11 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_8": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-2.x", "supported_major_versions": ["2"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", @@ -126,7 +133,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_9": { "description": "Indexing and search configuration for pmc workload", @@ -141,7 +149,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_10": { "description": "Indexing only configuration for stack-overflow workload", @@ -156,6 +165,7 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" } } diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 2a54c2072de59..c494df6e27ce3 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -60,6 +60,9 @@ jobs: for (const [key, value] of Object.entries(clusterBenchmarkConfigs)) { core.exportVariable(key, value); } + if (benchmarkConfigs[configId].hasOwnProperty('baseline_cluster_config')) { + core.exportVariable('BASELINE_CLUSTER_CONFIG', benchmarkConfigs[configId]['baseline_cluster_config']); + } - name: Post invalid format comment if: steps.check_comment.outputs.invalid == 'true' uses: actions/github-script@v7 @@ -105,13 +108,25 @@ jobs: echo "prHeadRepo=$headRepo" >> $GITHUB_ENV echo "prHeadRefSha=$headRefSha" >> $GITHUB_ENV - id: get_approvers - run: | - echo "approvers=$(cat .github/CODEOWNERS | grep '^\*' | tr -d '* ' | sed 's/@/,/g' | sed 's/,//1')" >> $GITHUB_OUTPUT + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + result-encoding: string + script: | + // Get the collaborators - filtered to maintainer permissions + const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { + owner: context.repo.owner, + repo: context.repo.repo, + permission: 'maintain', + affiliation: 'all', + per_page: 100 + }); + return maintainersResponse.data.map(item => item.login).join(', '); - uses: trstringer/manual-approval@v1 - if: (!contains(steps.get_approvers.outputs.approvers, github.event.comment.user.login)) + if: (!contains(steps.get_approvers.outputs.result, github.event.comment.user.login)) with: secret: ${{ github.TOKEN }} - approvers: ${{ steps.get_approvers.outputs.approvers }} + approvers: ${{ steps.get_approvers.outputs.result }} minimum-approvals: 1 issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" @@ -126,6 +141,7 @@ jobs: uses: actions/setup-java@v4 with: java-version: 21 + distribution: 'temurin' - name: Build and Assemble OpenSearch from PR run: | ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index cf9343c2c3aac..cd0415119282c 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -27,18 +27,29 @@ jobs: continue-on-error: true - run: | # The check was possibly skipped leading to success for both the jobs + has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} + has_breaking_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), '>breaking')}} + if [[ $has_breaking_label == true && $has_backport_label == true ]]; then + echo "error: Please make sure that the PR does not have a backport label associated with it when making breaking changes" + exit 1 + fi + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && ${{ steps.verify-changelog.outcome }} == 'success' ]]; then exit 0 fi - + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then echo "error: Please ensure a changelog entry exists in CHANGELOG.md or CHANGELOG-3.0.md" exit 1 fi - + # Concatenates the labels and checks if the string contains "backport" - has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} if [[ ${{ steps.verify-changelog.outcome }} == 'success' && $has_backport_label == false ]]; then echo "error: Please make sure that the PR has a backport label associated with it when making an entry to the CHANGELOG.md file" exit 1 fi + + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && $has_backport_label == true ]]; then + echo "error: Please make sure that the PR does not have a backport label associated with it when making an entry to the CHANGELOG-3.0.md file" + exit 1 + fi diff --git a/CHANGELOG.md b/CHANGELOG.md index a6ee67a5fe11d..3d2fed3896c81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,28 +5,38 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- [Offline Nodes] Adds offline-tasks library containing various interfaces to be used for Offline Background Tasks. ([#13574](https://github.com/opensearch-project/OpenSearch/pull/13574)) - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) - [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) +- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) - Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) - Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) - [Concurrent Segment Search] Support composite aggregations with scripting ([#15072](https://github.com/opensearch-project/OpenSearch/pull/15072)) - Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) +- Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) +- [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) +- Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) -- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) +- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.16.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861), [#15205](https://github.com/opensearch-project/OpenSearch/pull/15205)) - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) - Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) - Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) +- Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.8 and `org.bouncycastle:bc-fips` from 1.0.2.5 to 2.0.0 in /distribution/tools/plugin-cli ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103)) +- Bump `com.azure:azure-core` from 1.49.1 to 1.51.0 ([#15111](https://github.com/opensearch-project/OpenSearch/pull/15111)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.5 to 1.1.10.6 ([#15207](https://github.com/opensearch-project/OpenSearch/pull/15207)) +- Bump `com.azure:azure-xml` from 1.0.0 to 1.1.0 ([#15206](https://github.com/opensearch-project/OpenSearch/pull/15206)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) +- Replace and block usages of org.apache.logging.log4j.util.Strings ([#15238](https://github.com/opensearch-project/OpenSearch/pull/15238)) - Updated `termsQuery` in `KeywordField` to set custom rewrite_override before executing query ### Deprecated @@ -35,8 +45,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) +- Fix NPE when bulk ingest with empty pipeline ([#15033](https://github.com/opensearch-project/OpenSearch/pull/15033)) - Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) - Fix delete index template failed when the index template matches a data stream but is unused ([#15080](https://github.com/opensearch-project/OpenSearch/pull/15080)) +- Fix array_index_out_of_bounds_exception when indexing documents with field name containing only dot ([#15126](https://github.com/opensearch-project/OpenSearch/pull/15126)) +- Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index f77c69ddeff2a..7be19fbc877d7 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers | Maintainer | GitHub ID | Affiliation | -| ------------------------ | ------------------------------------------------------- | ----------- | +|--------------------------|---------------------------------------------------------|-------------| | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -18,6 +18,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | +| Varun Bansal | [linuxpi](https://github.com/linuxpi) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | diff --git a/TRIAGING.md b/TRIAGING.md index 53ef77de49159..dddcbc15394ab 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -31,8 +31,8 @@ Meeting structure may vary slightly, but the general structure is as follows: - [Search](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22) - [Indexing](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22%2C) - [Storage](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3AStorage%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22) - - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22) - - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) + - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22) + - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) 5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. 6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. 7. **Review of Old Untriaged Issues:** Look at all [untriaged issues older than 14 days](https://peternied.github.io/redirect/issue_search.html?owner=opensearch-project&repo=OpenSearch&tag=untriaged&created-since-days=14) to prevent issues from falling through the cracks. diff --git a/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt index f9f24fd1e2367..199e206450178 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt @@ -17,6 +17,9 @@ java.nio.file.Paths @ Use org.opensearch.common.io.PathUtils.get() instead. java.nio.file.FileSystems#getDefault() @ use org.opensearch.common.io.PathUtils.getDefaultFileSystem() instead. +joptsimple.internal.Strings @ use org.opensearch.core.common.Strings instead. +org.apache.logging.log4j.util.Strings @ use org.opensearch.core.common.Strings instead. + java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.opensearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.opensearch.env.Environment.isWritable() instead, impacted by JDK-8034057 diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 3083ad4375460..a619ba1acf6a7 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -37,8 +37,8 @@ base { dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") - api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.5" + api "org.bouncycastle:bcpg-fips:2.0.8" + api "org.bouncycastle:bc-fips:2.0.0" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { @@ -58,33 +58,6 @@ test { jvmArgs += [ "-Djava.security.egd=file:/dev/urandom" ] } -/* - * these two classes intentionally use the following JDK internal APIs in order to offer the necessary - * functionality - * - * sun.security.internal.spec.TlsKeyMaterialParameterSpec - * sun.security.internal.spec.TlsKeyMaterialSpec - * sun.security.internal.spec.TlsMasterSecretParameterSpec - * sun.security.internal.spec.TlsPrfParameterSpec - * sun.security.internal.spec.TlsRsaPremasterSecretParameterSpec - * sun.security.provider.SecureRandom - * - */ -thirdPartyAudit.ignoreViolations( - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider$CoreSecureRandom', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$BaseTLSKeyGeneratorSpi', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSPRFKeyGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator$2' -) - thirdPartyAudit.ignoreMissingClasses( 'org.brotli.dec.BrotliInputStream', 'org.objectweb.asm.AnnotationVisitor', diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 deleted file mode 100644 index 1b44c77dd4ee1..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -704e65f7e4fe679e5ab2aa8a840f27f8ced4c522 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 deleted file mode 100644 index 44cebc7c92d87..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e1952428655ea822066f86df2e3ecda8fa0ba2b \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 new file mode 100644 index 0000000000000..758ee2fdf9de6 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 @@ -0,0 +1 @@ +51c2f633e0c32d10de1ebab4c86f93310ff820f8 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index c2d8ce9be29dd..28cb989185ff7 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -107,6 +107,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_15_0 = new Version(2150099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_15_1 = new Version(2150199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_1); + public static final Version V_2_16_1 = new Version(2160199, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_17_0 = new Version(2170099, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java index 4efaacecd0e67..4605aa684db1c 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java @@ -375,7 +375,7 @@ private static void skipToListStart(XContentParser parser) throws IOException { } } - // read a list without bounds checks, assuming the the current parser is always on an array start + // read a list without bounds checks, assuming the current parser is always on an array start private static List readListUnsafe(XContentParser parser, Supplier> mapFactory) throws IOException { assert parser.currentToken() == Token.START_ARRAY; ArrayList list = new ArrayList<>(); diff --git a/libs/task-commons/build.gradle b/libs/task-commons/build.gradle new file mode 100644 index 0000000000000..dde3acd8effcf --- /dev/null +++ b/libs/task-commons/build.gradle @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +dependencies { + api project(':libs:opensearch-common') + + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-task-commons' + } +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java new file mode 100644 index 0000000000000..625e15dfb3b6d --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.TaskStatus; +import org.opensearch.task.commons.task.TaskType; +import org.opensearch.task.commons.worker.WorkerNode; + +/** + * Request object for listing tasks + */ +public class TaskListRequest { + + /** + * Filters listTasks response by specific task status' + */ + private TaskStatus[] taskStatus; + + /** + * Filter listTasks response by specific task types + */ + private TaskType[] taskTypes; + + /** + * Filter listTasks response by specific worker node + */ + private WorkerNode workerNodes; + + /** + * Depicts the start page number for the list call. + * + * @see TaskManagerClient#listTasks(TaskListRequest) + */ + private int startPageNumber; + + /** + * Depicts the page size for the list call. + * + * @see TaskManagerClient#listTasks(TaskListRequest) + */ + private int pageSize; + + /** + * Default constructor + */ + public TaskListRequest() {} + + /** + * Update task types to filter with in the request + * @param taskTypes TaskType[] + * @return ListTaskRequest + */ + public TaskListRequest taskType(TaskType... taskTypes) { + this.taskTypes = taskTypes; + return this; + } + + /** + * Update task status to filter with in the request + * @param taskStatus TaskStatus[] + * @return ListTaskRequest + */ + public TaskListRequest taskType(TaskStatus... taskStatus) { + this.taskStatus = taskStatus; + return this; + } + + /** + * Update worker node to filter with in the request + * @param workerNode WorkerNode + * @return ListTaskRequest + */ + private TaskListRequest workerNode(WorkerNode workerNode) { + this.workerNodes = workerNode; + return this; + } + + /** + * Update page number to start with when fetching the list of tasks + * @param startPageNumber startPageNumber + * @return ListTaskRequest + */ + public TaskListRequest startPageNumber(int startPageNumber) { + this.startPageNumber = startPageNumber; + return this; + } + + /** + * Update page size for the list tasks response + * @param pageSize int + * @return ListTaskRequest + */ + public TaskListRequest pageSize(int pageSize) { + this.pageSize = pageSize; + return this; + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java new file mode 100644 index 0000000000000..23ad8eabdc365 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.task.Task; +import org.opensearch.task.commons.task.TaskId; +import org.opensearch.task.commons.worker.WorkerNode; + +import java.util.List; + +/** + * Client used to interact with Task Store/Queue. + * + * TODO: TaskManager can be something not running an opensearch process. + * We need to come up with a way to allow this interface to be used with in and out opensearch as well + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TaskManagerClient { + + /** + * Get task from TaskStore/Queue + * + * @param taskId TaskId of the task to be retrieved + * @return Task corresponding to TaskId + */ + Task getTask(TaskId taskId); + + /** + * Update task in TaskStore/Queue + * + * @param task Task to be updated + */ + void updateTask(Task task); + + /** + * Mark task as cancelled. + * Ongoing Tasks can be cancelled as well if the corresponding worker supports cancellation + * + * @param taskId TaskId of the task to be cancelled + */ + void cancelTask(TaskId taskId); + + /** + * List all tasks applying all the filters present in listTaskRequest + * + * @param taskListRequest TaskListRequest + * @return list of all the task matching the filters in listTaskRequest + */ + List listTasks(TaskListRequest taskListRequest); + + /** + * Assign Task to a particular WorkerNode. This ensures no 2 worker Nodes work on the same task. + * This API can be used in both pull and push models of task assignment. + * + * @param taskId TaskId of the task to be assigned + * @param node WorkerNode task is being assigned to + * @return true if task is assigned successfully, false otherwise + */ + boolean assignTask(TaskId taskId, WorkerNode node); +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java new file mode 100644 index 0000000000000..18f3421f9aa97 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.Task; + +/** + * Producer interface used to submit new tasks for execution on worker nodes. + */ +public interface TaskProducerClient { + + /** + * Submit a new task to TaskStore/Queue + * + * @param task Task to be submitted for execution on offline nodes + */ + void submitTask(Task task); +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java new file mode 100644 index 0000000000000..59abe189434dd --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.Task; +import org.opensearch.task.commons.task.TaskId; + +import java.util.List; + +/** + * Consumer interface used to find new tasks assigned to a {@code WorkerNode} for execution. + */ +public interface TaskWorkerClient { + + /** + * List all tasks assigned to a WorkerNode. + * Useful when the implementation uses a separate store for Task assignments to Worker nodes + * + * @param taskListRequest TaskListRequest + * @return list of all tasks assigned to a WorkerNode + */ + List getAssignedTasks(TaskListRequest taskListRequest); + + /** + * Sends task heart beat to Task Store/Queue + * + * @param taskId TaskId of Task to send heartbeat for + * @param timestamp timestamp of heartbeat to be recorded in TaskStore/Queue + */ + void sendTaskHeartbeat(TaskId taskId, long timestamp); + +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java new file mode 100644 index 0000000000000..1329f3888248c --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains task client related classes + */ +package org.opensearch.task.commons.clients; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java new file mode 100644 index 0000000000000..4cb773ace62ce --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains offline tasks related classes + */ +package org.opensearch.task.commons; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java new file mode 100644 index 0000000000000..7ad567b57bd42 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java @@ -0,0 +1,361 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.worker.WorkerNode; + +/** + * A Background Task to be run on Offline Node. + */ +@ExperimentalApi +public class Task { + + /** + * Task identifier used to uniquely identify a Task + */ + private final TaskId taskId; + + /** + * Depicts latest state of the Task + */ + private final TaskStatus taskStatus; + + /** + * Various params to used for Task execution + */ + private final TaskParams params; + + /** + * Type/Category of the Task + */ + private final TaskType taskType; + + /** + * Worker Node on which the Task is to be executed + */ + private final WorkerNode assignedNode; + + /** + * Timestamp at which the Task was created + */ + private final long createdAt; + + /** + * Timestamp at which the Task was assigned to a worker + */ + private final long assignedAt; + + /** + * Timestamp at which the Task was started execution on worker + */ + private final long startedAt; + + /** + * Timestamp at which the Task was either completed/failed/cancelled + */ + private final long completedAt; + + /** + * Timestamp at which last heartbeat was sent by the worker + */ + private final long lastHeartbeatAt; + + /** + * Constructor for Task + * + * @param taskId Task identifier + * @param taskStatus Task status + * @param params Task Params + * @param taskType Task Type + * @param createdAt Timestamp at which the Task was created + * @param assignedAt Timestamp at which the Task was assigned to a worker + * @param startedAt Timestamp at which the Task was started execution on worker + * @param completedAt Timestamp at which the Task was either completed/failed/cancelled + * @param lastHeartbeatAt Timestamp at which last heartbeat was sent by the worker + * @param assignedNode Worker Node on which the Task is to be executed + */ + public Task( + TaskId taskId, + TaskStatus taskStatus, + TaskParams params, + TaskType taskType, + long createdAt, + @Nullable long assignedAt, + @Nullable long startedAt, + @Nullable long completedAt, + @Nullable long lastHeartbeatAt, + @Nullable WorkerNode assignedNode + ) { + this.taskId = taskId; + this.taskStatus = taskStatus; + this.params = params; + this.taskType = taskType; + this.createdAt = createdAt; + this.assignedAt = assignedAt; + this.startedAt = startedAt; + this.completedAt = completedAt; + this.lastHeartbeatAt = lastHeartbeatAt; + this.assignedNode = assignedNode; + } + + /** + * Get TaskId + * @return TaskId + */ + public TaskId getTaskId() { + return taskId; + } + + /** + * Get TaskStatus + * @return TaskStatus + */ + public TaskStatus getTaskStatus() { + return taskStatus; + } + + /** + * Get TaskParams + * @return TaskParams + */ + public TaskParams getParams() { + return params; + } + + /** + * Get TaskType + * @return TaskType + */ + public TaskType getTaskType() { + return taskType; + } + + /** + * Get Task Creation Time + * @return createdAt + */ + public long getCreatedAt() { + return createdAt; + } + + /** + * Get Task Assignment Time + * @return assignedAt + */ + public long getAssignedAt() { + return assignedAt; + } + + /** + * Get Task Start Time + * @return startedAt + */ + public long getStartedAt() { + return startedAt; + } + + /** + * Get Task Completion Time + * @return completedAt + */ + public long getCompletedAt() { + return completedAt; + } + + /** + * Get Last Heartbeat Time + * @return lastHeartbeatAt + */ + public long getLastHeartbeatAt() { + return lastHeartbeatAt; + } + + /** + * Get Task Assigned Node + * @return assignedNode + */ + public WorkerNode getAssignedNode() { + return assignedNode; + } + + /** + * Builder class for Task. + */ + public static class Builder { + /** + * Task identifier used to uniquely identify a Task + */ + private final TaskId taskId; + + /** + * Depicts latest state of the Task + */ + private TaskStatus taskStatus; + + /** + * Various params to used for Task execution + */ + private final TaskParams params; + + /** + * Type/Category of the Task + */ + private final TaskType taskType; + + /** + * Type/Category of the Task + */ + private WorkerNode assignedNode; + + /** + * Timestamp at which the Task was created + */ + private final long createdAt; + + /** + * Timestamp at which the Task was assigned to a worker + */ + private long assignedAt; + + /** + * Timestamp at which the Task was started execution on worker + */ + private long startedAt; + + /** + * Timestamp at which the Task was either completed/failed/cancelled + */ + private long completedAt; + + /** + * Timestamp at which last heartbeat was sent by the worker + */ + private long lastHeartbeatAt; + + /** + * Constructor for Task Builder + * + * @param taskId Task identifier + * @param taskStatus Task status + * @param params Task Params + * @param taskType Task Type + * @param createdAt Task Creation Time + */ + private Builder(TaskId taskId, TaskStatus taskStatus, TaskParams params, TaskType taskType, long createdAt) { + this.taskId = taskId; + this.taskStatus = taskStatus; + this.params = params; + this.taskType = taskType; + this.createdAt = createdAt; + } + + /** + * Build Builder from Task + * @param task Task to build from + * @return Task.Builder + */ + public static Builder builder(Task task) { + Builder builder = new Builder( + task.getTaskId(), + task.getTaskStatus(), + task.getParams(), + task.getTaskType(), + task.getCreatedAt() + ); + builder.assignedAt(task.getAssignedAt()); + builder.startedAt(task.getStartedAt()); + builder.completedAt(task.getCompletedAt()); + builder.lastHeartbeatAt(task.getLastHeartbeatAt()); + builder.assignedNode(task.getAssignedNode()); + return builder; + } + + /** + * Build Builder from various Task attributes + * @param taskId Task identifier + * @param taskStatus TaskStatus + * @param params TaskParams + * @param taskType TaskType + * @param createdAt Task Creation Time + * @return Task.Builder + */ + public static Builder builder(TaskId taskId, TaskStatus taskStatus, TaskParams params, TaskType taskType, long createdAt) { + return new Builder(taskId, taskStatus, params, taskType, createdAt); + } + + /** + * Set Task Assignment Time + * @param assignedAt Timestamp at which the Task was assigned to a worker + */ + public void assignedAt(long assignedAt) { + this.assignedAt = assignedAt; + } + + /** + * Set Task Start Time + * @param startedAt Timestamp at which the Task was started execution on worker + */ + public void startedAt(long startedAt) { + this.startedAt = startedAt; + } + + /** + * Set Task Completion Time + * @param completedAt Timestamp at which the Task was either completed/failed/cancelled + */ + public void completedAt(long completedAt) { + this.completedAt = completedAt; + } + + /** + * Set Task Last Heartbeat Time for the task + * @param lastHeartbeatAt Timestamp at which last heartbeat was sent by the worker + */ + public void lastHeartbeatAt(long lastHeartbeatAt) { + this.lastHeartbeatAt = lastHeartbeatAt; + } + + /** + * Update the Task Status + * @param taskStatus {@link TaskStatus} - current status of the Task + */ + public void taskStatus(TaskStatus taskStatus) { + this.taskStatus = taskStatus; + } + + /** + * Set Task Assigned Node + * @param node Worker Node on which the Task is to be executed + */ + public void assignedNode(WorkerNode node) { + this.assignedNode = node; + } + + /** + * Build Task from Builder + * @return Task + */ + public Task build() { + return new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + } + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java new file mode 100644 index 0000000000000..7fb7e1536dc17 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Objects; + +/** + * Class encapsulating Task identifier + */ +@ExperimentalApi +public class TaskId { + + /** + * Identified of the Task + */ + private final String id; + + /** + * Constructor to initialize TaskId + * @param id String value of Task id + */ + public TaskId(String id) { + this.id = id; + } + + /** + * Get id value + * @return id + */ + public String getValue() { + return id; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + TaskId other = (TaskId) obj; + return this.id.equals(other.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java new file mode 100644 index 0000000000000..ac6126a02cf24 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Base class for all TaskParams implementation of various TaskTypes + */ +@ExperimentalApi +public abstract class TaskParams { + + /** + * Default constructor + */ + public TaskParams() {} +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java new file mode 100644 index 0000000000000..e44e5c7fee620 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Task status enum + */ +@ExperimentalApi +public enum TaskStatus { + + /** + * TaskStatus of a Task which is not yet assigned to or picked up by a worker + */ + UNASSIGNED, + + /** + * TaskStatus of a Task which is assigned to or picked up by a worker but hasn't started execution yet. + * This status confirms that a worker will execute this task and no other worker should pick it up. + */ + ASSIGNED, + + /** + * TaskStatus of an in progress Task + */ + ACTIVE, + + /** + * TaskStatus of a finished Task + */ + SUCCESS, + + /** + * TaskStatus of a Task which failed in 1 or more attempts + */ + FAILED, + + /** + * TaskStatus of a cancelled Task + */ + CANCELLED +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java new file mode 100644 index 0000000000000..ef267a86e3da4 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Enum for task type + */ +@ExperimentalApi +public enum TaskType { + /** + * For all segment merge related tasks + */ + MERGE, + + /** + * For all snapshot related tasks + */ + SNAPSHOT +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java new file mode 100644 index 0000000000000..e99e4079cc7af --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains tasks related classes + */ +package org.opensearch.task.commons.task; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java new file mode 100644 index 0000000000000..6d9fd1fed23e4 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.task.Task; + +/** + * Task Worker that executes the Task + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TaskWorker { + + /** + * Execute the Task + * + * @param task Task to be executed + */ + void executeTask(Task task); + +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java new file mode 100644 index 0000000000000..1b9426d472184 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import java.util.Objects; + +/** + * Represents a worker node in the fleet + */ +public class WorkerNode { + /** + * The unique identifier of the worker node. + */ + private final String id; + + /** + * The name of the worker node. + */ + private final String name; + + /** + * The IP address of the worker node. + */ + private final String ip; + + /** + * Creates a new worker node with the given ID, name, and IP address. + * + * @param id The unique identifier of the worker node. + * @param name The name of the worker node. + * @param ip The IP address of the worker node. + */ + private WorkerNode(String id, String name, String ip) { + this.id = id; + this.name = name; + this.ip = ip; + } + + /** + * Creates a new worker node with the given ID, name, and IP address. + * + * @param id The unique identifier of the worker node. + * @param name The name of the worker node. + * @param ip The IP address of the worker node. + * @return The created worker node. + */ + public static WorkerNode createWorkerNode(String id, String name, String ip) { + return new WorkerNode(id, name, ip); + } + + /** + * Returns the unique identifier of the worker node. + * + * @return The ID of the worker node. + */ + public String getId() { + return id; + } + + /** + * Returns the name of the worker node. + * + * @return The name of the worker node. + */ + public String getName() { + return name; + } + + /** + * Returns the IP address of the worker node. + * + * @return The IP address of the worker node. + */ + public String getIp() { + return ip; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WorkerNode that = (WorkerNode) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(ip, that.ip); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ip); + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java new file mode 100644 index 0000000000000..d74fa30e8b661 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains task worker related classes + */ +package org.opensearch.task.commons.worker; diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java new file mode 100644 index 0000000000000..21d2a3dd725b6 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.mocks; + +import org.opensearch.task.commons.task.TaskParams; + +public class MockTaskParams extends TaskParams { + + private final String value; + + public MockTaskParams(String mockValue) { + super(); + value = mockValue; + } + + public String getValue() { + return value; + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java new file mode 100644 index 0000000000000..e58382e9fc056 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.test.OpenSearchTestCase; + +/** + * Tests for {@link TaskId} + */ +public class TaskIdTests extends OpenSearchTestCase { + + public void testConstructorAndGetValue() { + TaskId taskId = new TaskId("123"); + assertEquals("123", taskId.getValue()); + } + + public void testEqualsWithSameId() { + TaskId taskId1 = new TaskId("456"); + TaskId taskId2 = new TaskId("456"); + assertEquals(taskId1, taskId2); + } + + public void testEqualsWithDifferentId() { + TaskId taskId1 = new TaskId("789"); + TaskId taskId2 = new TaskId("987"); + assertNotEquals(taskId1, taskId2); + } + + public void testEqualsWithNull() { + TaskId taskId = new TaskId("abc"); + assertNotEquals(null, taskId); + } + + public void testEqualsWithDifferentClass() { + TaskId taskId = new TaskId("def"); + assertNotEquals(taskId, new Object()); + } + + public void testHashCode() { + TaskId taskId1 = new TaskId("456"); + TaskId taskId2 = new TaskId("456"); + assertEquals(taskId1.hashCode(), taskId2.hashCode()); + + TaskId taskId3 = new TaskId("4567"); + assertNotEquals(taskId1.hashCode(), taskId3.hashCode()); + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java new file mode 100644 index 0000000000000..37c5f543daa02 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.task.commons.mocks.MockTaskParams; +import org.opensearch.task.commons.worker.WorkerNode; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Test for {@link Task} + */ +public class TaskTests extends OpenSearchTestCase { + + public void testTaskConstructorAndGetters() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + long assignedAt = createdAt + 1000; + long startedAt = createdAt + 2000; + long completedAt = createdAt + 3000; + long lastHeartbeatAt = createdAt + 2500; + WorkerNode assignedNode = WorkerNode.createWorkerNode("node1", "nodeip", "nodename"); + + Task task = new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + + assertEquals(taskId, task.getTaskId()); + assertEquals(taskStatus, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(assignedAt, task.getAssignedAt()); + assertEquals(startedAt, task.getStartedAt()); + assertEquals(completedAt, task.getCompletedAt()); + assertEquals(lastHeartbeatAt, task.getLastHeartbeatAt()); + assertEquals(assignedNode, task.getAssignedNode()); + } + + public void testBuilderFromTask() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + long assignedAt = createdAt + 1000; + long startedAt = createdAt + 2000; + long completedAt = createdAt + 3000; + long lastHeartbeatAt = createdAt + 2500; + WorkerNode assignedNode = WorkerNode.createWorkerNode("node1", "nodeip", "nodename"); + + Task originalTask = new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + + Task.Builder builder = Task.Builder.builder(originalTask); + Task newTask = builder.build(); + + assertEquals(originalTask.getTaskId(), newTask.getTaskId()); + assertEquals(originalTask.getTaskStatus(), newTask.getTaskStatus()); + assertEquals(originalTask.getParams(), newTask.getParams()); + assertEquals(originalTask.getTaskType(), newTask.getTaskType()); + assertEquals(originalTask.getCreatedAt(), newTask.getCreatedAt()); + assertEquals(originalTask.getAssignedAt(), newTask.getAssignedAt()); + assertEquals(originalTask.getStartedAt(), newTask.getStartedAt()); + assertEquals(originalTask.getCompletedAt(), newTask.getCompletedAt()); + assertEquals(originalTask.getLastHeartbeatAt(), newTask.getLastHeartbeatAt()); + assertEquals(originalTask.getAssignedNode(), newTask.getAssignedNode()); + } + + public void testBuilderFromAttributes() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + + Task.Builder builder = Task.Builder.builder(taskId, taskStatus, params, taskType, createdAt); + builder.assignedAt(createdAt + 1000); + builder.startedAt(createdAt + 2000); + builder.completedAt(createdAt + 3000); + builder.lastHeartbeatAt(createdAt + 2500); + builder.assignedNode(WorkerNode.createWorkerNode("node1", "nodeip", "nodename")); + builder.taskStatus(TaskStatus.ACTIVE); + + Task task = builder.build(); + + assertEquals(taskId, task.getTaskId()); + assertEquals(TaskStatus.ACTIVE, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(createdAt + 1000, task.getAssignedAt()); + assertEquals(createdAt + 2000, task.getStartedAt()); + assertEquals(createdAt + 3000, task.getCompletedAt()); + assertEquals(createdAt + 2500, task.getLastHeartbeatAt()); + assertEquals(WorkerNode.createWorkerNode("node1", "nodeip", "nodename"), task.getAssignedNode()); + } + + public void testBuilderWithNullOptionalFields() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + + Task.Builder builder = Task.Builder.builder(taskId, taskStatus, params, taskType, createdAt); + Task task = builder.build(); + + assertEquals(taskId, task.getTaskId()); + assertEquals(taskStatus, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(0, task.getAssignedAt()); + assertEquals(0, task.getStartedAt()); + assertEquals(0, task.getCompletedAt()); + assertEquals(0, task.getLastHeartbeatAt()); + assertNull(task.getAssignedNode()); + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java new file mode 100644 index 0000000000000..07570bf947a27 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import org.opensearch.test.OpenSearchTestCase; + +/** + * Tests for {@link WorkerNode} + */ +public class WorkerNodeTests extends OpenSearchTestCase { + + public void testCreateWorkerNode() { + WorkerNode worker = WorkerNode.createWorkerNode("1", "Worker1", "192.168.1.1"); + assertNotNull(worker); + assertEquals("1", worker.getId()); + assertEquals("Worker1", worker.getName()); + assertEquals("192.168.1.1", worker.getIp()); + } + + public void testEquality() { + WorkerNode worker1 = WorkerNode.createWorkerNode("5", "Worker5", "192.168.1.5"); + WorkerNode worker2 = WorkerNode.createWorkerNode("5", "Worker5", "192.168.1.5"); + WorkerNode worker3 = WorkerNode.createWorkerNode("6", "Worker6", "192.168.1.6"); + + assertEquals(worker1, worker2); + assertNotEquals(worker1, worker3); + assertNotEquals(worker2, worker3); + } + + public void testHashCode() { + WorkerNode worker1 = WorkerNode.createWorkerNode("7", "Worker7", "192.168.1.7"); + WorkerNode worker2 = WorkerNode.createWorkerNode("7", "Worker7", "192.168.1.7"); + + assertEquals(worker1.hashCode(), worker2.hashCode()); + } + + public void testNotEqualToNull() { + WorkerNode worker = WorkerNode.createWorkerNode("8", "Worker8", "192.168.1.8"); + assertNotEquals(null, worker); + } + + public void testNotEqualToDifferentClass() { + WorkerNode worker = WorkerNode.createWorkerNode("9", "Worker9", "192.168.1.9"); + assertNotEquals(worker, new Object()); + } +} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index 47cc80d6df310..ecd56ea7f277e 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -178,7 +178,10 @@ teardown: - skip: version: " - 2.15.99" reason: "fixed in 2.16.0" + features: allowed_warnings - do: + allowed_warnings: + - "index template [test_for_bulk_upsert_index_template] has index patterns [test_bulk_upsert_*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_for_bulk_upsert_index_template] will take precedence during new index creation" indices.put_index_template: name: test_for_bulk_upsert_index_template body: diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 15e3158f2dbc4..6b63311cb3125 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,9 +44,9 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.49.1' + api 'com.azure:azure-core:1.51.0' api 'com.azure:azure-json:1.1.0' - api 'com.azure:azure-xml:1.0.0' + api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.25.1' api 'com.azure:azure-core-http-netty:1.15.1' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 deleted file mode 100644 index d487c08c26e94..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7c44282eaa0f5a3be4b920d6a057509adfe8674 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 new file mode 100644 index 0000000000000..7200f59af2f9a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 @@ -0,0 +1 @@ +ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 deleted file mode 100644 index 798ec5d95c6ac..0000000000000 --- a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba584703bd47e9e789343ee3332f0f5a64f7f187 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 new file mode 100644 index 0000000000000..4f9cfcac02f6e --- /dev/null +++ b/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 @@ -0,0 +1 @@ +8218a00c07f9f66d5dc7ae2ba613da6890867497 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 71ffd0fd959f1..970388498ee26 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -157,7 +157,7 @@ private BlobContainer createBlobContainer(final int maxRetries) { + "/"; clientSettings.put(ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); - clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(2000)); + clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(5000)); final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(ACCOUNT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "account"); @@ -171,7 +171,7 @@ RequestRetryOptions createRetryPolicy(final AzureStorageSettings azureStorageSet return new RequestRetryOptions( RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries(), - 1, + 5, 10L, 100L, secondaryHost diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index f117bae658abe..e1e5f422f3e07 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -76,7 +76,7 @@ dependencies { api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" - api 'org.apache.commons:commons-lang3:3.15.0' + api 'org.apache.commons:commons-lang3:3.16.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 deleted file mode 100644 index 4b1179c935946..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21581109b4be710ea4b195d5760392ec284f9f11 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 new file mode 100644 index 0000000000000..ef4f1c1fc2002 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 @@ -0,0 +1 @@ +3eb54effe40946dfb06dc5cd6c7ce4116cd51ea4 \ No newline at end of file diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle new file mode 100644 index 0000000000000..cb14d22ef149f --- /dev/null +++ b/plugins/workload-management/build.gradle @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'OpenSearch Workload Management Plugin.' + classname 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' +} + +dependencies { +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java new file mode 100644 index 0000000000000..80807f0d5bc37 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.action.ActionRequest; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; + +import java.util.List; +import java.util.function.Supplier; + +/** + * Plugin class for WorkloadManagement + */ +public class WorkloadManagementPlugin extends Plugin implements ActionPlugin { + + /** + * Default constructor + */ + public WorkloadManagementPlugin() {} + + @Override + public List> getActions() { + return List.of(new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class)); + } + + @Override + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + return List.of(new RestCreateQueryGroupAction()); + } + + @Override + public List> getSettings() { + return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java new file mode 100644 index 0000000000000..14cb8cfcd125a --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; + +/** + * Transport action to create QueryGroup + * + * @opensearch.experimental + */ +public class CreateQueryGroupAction extends ActionType { + + /** + * An instance of CreateQueryGroupAction + */ + public static final CreateQueryGroupAction INSTANCE = new CreateQueryGroupAction(); + + /** + * Name for CreateQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_create"; + + /** + * Default constructor + */ + private CreateQueryGroupAction() { + super(NAME, CreateQueryGroupResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java new file mode 100644 index 0000000000000..ff6422be36885 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.UUIDs; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentParser; +import org.joda.time.Instant; + +import java.io.IOException; + +/** + * A request for create QueryGroup + * User input schema: + * { + * "name": "analytics", + * "resiliency_mode": "enforced", + * "resource_limits": { + * "cpu" : 0.4, + * "memory" : 0.2 + * } + * } + * + * @opensearch.experimental + */ +public class CreateQueryGroupRequest extends ActionRequest { + private final QueryGroup queryGroup; + + /** + * Constructor for CreateQueryGroupRequest + * @param queryGroup - A {@link QueryGroup} object + */ + public CreateQueryGroupRequest(QueryGroup queryGroup) { + this.queryGroup = queryGroup; + } + + /** + * Constructor for CreateQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public CreateQueryGroupRequest(StreamInput in) throws IOException { + super(in); + queryGroup = new QueryGroup(in); + } + + /** + * Generate a CreateQueryGroupRequest from XContent + * @param parser - A {@link XContentParser} object + */ + public static CreateQueryGroupRequest fromXContent(XContentParser parser) throws IOException { + QueryGroup.Builder builder = QueryGroup.Builder.fromXContent(parser); + return new CreateQueryGroupRequest(builder._id(UUIDs.randomBase64UUID()).updatedAt(Instant.now().getMillis()).build()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + queryGroup.writeTo(out); + } + + /** + * QueryGroup getter + */ + public QueryGroup getQueryGroup() { + return queryGroup; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java new file mode 100644 index 0000000000000..9a2a8178c0a29 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response for the create API for QueryGroup + * + * @opensearch.experimental + */ +public class CreateQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final QueryGroup queryGroup; + private final RestStatus restStatus; + + /** + * Constructor for CreateQueryGroupResponse + * @param queryGroup - The QueryGroup to be included in the response + * @param restStatus - The restStatus for the response + */ + public CreateQueryGroupResponse(final QueryGroup queryGroup, RestStatus restStatus) { + this.queryGroup = queryGroup; + this.restStatus = restStatus; + } + + /** + * Constructor for CreateQueryGroupResponse + * @param in - A {@link StreamInput} object + */ + public CreateQueryGroupResponse(StreamInput in) throws IOException { + queryGroup = new QueryGroup(in); + restStatus = RestStatus.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + queryGroup.writeTo(out); + RestStatus.writeTo(out, restStatus); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryGroup.toXContent(builder, params); + } + + /** + * queryGroup getter + */ + public QueryGroup getQueryGroup() { + return queryGroup; + } + + /** + * restStatus getter + */ + public RestStatus getRestStatus() { + return restStatus; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java new file mode 100644 index 0000000000000..01aa8cfb5e610 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +/** + * Transport action to create QueryGroup + * + * @opensearch.experimental + */ +public class TransportCreateQueryGroupAction extends HandledTransportAction { + + private final ThreadPool threadPool; + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportCreateQueryGroupAction + * + * @param actionName - action name + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportCreateQueryGroupAction( + String actionName, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super(CreateQueryGroupAction.NAME, transportService, actionFilters, CreateQueryGroupRequest::new); + this.threadPool = threadPool; + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void doExecute(Task task, CreateQueryGroupRequest request, ActionListener listener) { + threadPool.executor(ThreadPool.Names.SAME) + .execute(() -> queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java new file mode 100644 index 0000000000000..9921500df8a81 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the action classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.action; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java new file mode 100644 index 0000000000000..84c99967b226b --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base package for WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java new file mode 100644 index 0000000000000..b0e0af4f9d17f --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.CreateQueryGroupRequest; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** + * Rest action to create a QueryGroup + * + * @opensearch.experimental + */ +public class RestCreateQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestCreateQueryGroupAction + */ + public RestCreateQueryGroupAction() {} + + @Override + public String getName() { + return "create_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(POST, "_wlm/query_group/"), new Route(PUT, "_wlm/query_group/")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + CreateQueryGroupRequest createQueryGroupRequest = CreateQueryGroupRequest.fromXContent(parser); + return channel -> client.execute(CreateQueryGroupAction.INSTANCE, createQueryGroupRequest, createQueryGroupResponse(channel)); + } + } + + private RestResponseListener createQueryGroupResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final CreateQueryGroupResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java new file mode 100644 index 0000000000000..7d7cb9028fdb8 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the rest classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.rest; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java new file mode 100644 index 0000000000000..b2164df561bf9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -0,0 +1,201 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.search.ResourceType; + +import java.util.EnumMap; +import java.util.Map; +import java.util.Optional; + +/** + * This class defines the functions for QueryGroup persistence + */ +public class QueryGroupPersistenceService { + static final String SOURCE = "query-group-persistence-service"; + private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); + /** + * max QueryGroup count setting name + */ + public static final String QUERY_GROUP_COUNT_SETTING_NAME = "node.query_group.max_count"; + /** + * default max queryGroup count on any node at any given point in time + */ + private static final int DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE = 100; + /** + * min queryGroup count on any node at any given point in time + */ + private static final int MIN_QUERY_GROUP_COUNT_VALUE = 1; + /** + * max QueryGroup count setting + */ + public static final Setting MAX_QUERY_GROUP_COUNT = Setting.intSetting( + QUERY_GROUP_COUNT_SETTING_NAME, + DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE, + 0, + QueryGroupPersistenceService::validateMaxQueryGroupCount, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private final ClusterService clusterService; + private volatile int maxQueryGroupCount; + final ThrottlingKey createQueryGroupThrottlingKey; + + /** + * Constructor for QueryGroupPersistenceService + * + * @param clusterService {@link ClusterService} - The cluster service to be used by QueryGroupPersistenceService + * @param settings {@link Settings} - The settings to be used by QueryGroupPersistenceService + * @param clusterSettings {@link ClusterSettings} - The cluster settings to be used by QueryGroupPersistenceService + */ + @Inject + public QueryGroupPersistenceService( + final ClusterService clusterService, + final Settings settings, + final ClusterSettings clusterSettings + ) { + this.clusterService = clusterService; + this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); + clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); + } + + /** + * Set maxQueryGroupCount to be newMaxQueryGroupCount + * @param newMaxQueryGroupCount - the max number of QueryGroup allowed + */ + public void setMaxQueryGroupCount(int newMaxQueryGroupCount) { + validateMaxQueryGroupCount(newMaxQueryGroupCount); + this.maxQueryGroupCount = newMaxQueryGroupCount; + } + + /** + * Validator for maxQueryGroupCount + * @param maxQueryGroupCount - the maxQueryGroupCount number to be verified + */ + private static void validateMaxQueryGroupCount(int maxQueryGroupCount) { + if (maxQueryGroupCount > DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE || maxQueryGroupCount < MIN_QUERY_GROUP_COUNT_VALUE) { + throw new IllegalArgumentException(QUERY_GROUP_COUNT_SETTING_NAME + " should be in range [1-100]."); + } + } + + /** + * Update cluster state to include the new QueryGroup + * @param queryGroup {@link QueryGroup} - the QueryGroup we're currently creating + * @param listener - ActionListener for CreateQueryGroupResponse + */ + public void persistInClusterStateMetadata(QueryGroup queryGroup, ActionListener listener) { + clusterService.submitStateUpdateTask(SOURCE, new ClusterStateUpdateTask(Priority.NORMAL) { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return saveQueryGroupInClusterState(queryGroup, currentState); + } + + @Override + public ThrottlingKey getClusterManagerThrottlingKey() { + return createQueryGroupThrottlingKey; + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to save QueryGroup object due to error: {}, for source: {}.", e.getMessage(), source); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + CreateQueryGroupResponse response = new CreateQueryGroupResponse(queryGroup, RestStatus.OK); + listener.onResponse(response); + } + }); + } + + /** + * This method will be executed before we submit the new cluster state + * @param queryGroup - the QueryGroup we're currently creating + * @param currentClusterState - the cluster state before the update + */ + ClusterState saveQueryGroupInClusterState(final QueryGroup queryGroup, final ClusterState currentClusterState) { + final Map existingQueryGroups = currentClusterState.metadata().queryGroups(); + String groupName = queryGroup.getName(); + + // check if maxQueryGroupCount will breach + if (existingQueryGroups.size() == maxQueryGroupCount) { + logger.warn("{} value exceeded its assigned limit of {}.", QUERY_GROUP_COUNT_SETTING_NAME, maxQueryGroupCount); + throw new IllegalStateException("Can't create more than " + maxQueryGroupCount + " QueryGroups in the system."); + } + + // check for duplicate name + Optional findExistingGroup = existingQueryGroups.values() + .stream() + .filter(group -> group.getName().equals(groupName)) + .findFirst(); + if (findExistingGroup.isPresent()) { + logger.warn("QueryGroup with name {} already exists. Not creating a new one.", groupName); + throw new IllegalArgumentException("QueryGroup with name " + groupName + " already exists. Not creating a new one."); + } + + // check if there's any resource allocation that exceed limit of 1.0 + Map totalUsageMap = calculateTotalUsage(existingQueryGroups, queryGroup); + for (ResourceType resourceType : queryGroup.getResourceLimits().keySet()) { + if (totalUsageMap.get(resourceType) > 1) { + logger.warn("Total resource allocation for {} will go above the max limit of 1.0.", resourceType.getName()); + throw new IllegalArgumentException( + "Total resource allocation for " + resourceType.getName() + " will go above the max limit of 1.0." + ); + } + } + + return ClusterState.builder(currentClusterState) + .metadata(Metadata.builder(currentClusterState.metadata()).put(queryGroup).build()) + .build(); + } + + /** + * This method calculates the existing total usage of the all the resource limits + * @param existingQueryGroups - existing QueryGroups in the system + * @param queryGroup - the QueryGroup we're creating or updating + */ + private Map calculateTotalUsage(Map existingQueryGroups, QueryGroup queryGroup) { + final Map map = new EnumMap<>(ResourceType.class); + map.putAll(queryGroup.getResourceLimits()); + for (QueryGroup currGroup : existingQueryGroups.values()) { + if (!currGroup.getName().equals(queryGroup.getName())) { + for (ResourceType resourceType : queryGroup.getResourceLimits().keySet()) { + map.compute(resourceType, (k, v) -> v + currGroup.getResourceLimits().get(resourceType)); + } + } + } + return map; + } + + /** + * maxQueryGroupCount getter + */ + public int getMaxQueryGroupCount() { + return maxQueryGroupCount; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java new file mode 100644 index 0000000000000..5848e9c936623 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the service classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.service; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java new file mode 100644 index 0000000000000..fc324853d9b34 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.search.ResourceType.fromName; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +public class QueryGroupTestUtils { + public static final String NAME_ONE = "query_group_one"; + public static final String NAME_TWO = "query_group_two"; + public static final String _ID_ONE = "AgfUO5Ja9yfsYlONlYi3TQ=="; + public static final String _ID_TWO = "G5iIqHy4g7eK1qIAAAAIH53=1"; + public static final String NAME_NONE_EXISTED = "query_group_none_existed"; + public static final String MEMORY_STRING = "memory"; + public static final String MONITOR_STRING = "monitor"; + public static final long TIMESTAMP_ONE = 4513232413L; + public static final long TIMESTAMP_TWO = 4513232415L; + public static final QueryGroup queryGroupOne = builder().name(NAME_ONE) + ._id(_ID_ONE) + .mode(MONITOR_STRING) + .resourceLimits(Map.of(fromName(MEMORY_STRING), 0.3)) + .updatedAt(TIMESTAMP_ONE) + .build(); + + public static final QueryGroup queryGroupTwo = builder().name(NAME_TWO) + ._id(_ID_TWO) + .mode(MONITOR_STRING) + .resourceLimits(Map.of(fromName(MEMORY_STRING), 0.6)) + .updatedAt(TIMESTAMP_TWO) + .build(); + + public static List queryGroupList() { + List list = new ArrayList<>(); + list.add(queryGroupOne); + list.add(queryGroupTwo); + return list; + } + + public static ClusterState clusterState() { + final Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + return ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + } + + public static Set> clusterSettingsSet() { + Set> set = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + set.add(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + assertFalse(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT)); + return set; + } + + public static Settings settings() { + return Settings.builder().build(); + } + + public static ClusterSettings clusterSettings() { + return new ClusterSettings(settings(), clusterSettingsSet()); + } + + public static QueryGroupPersistenceService queryGroupPersistenceService() { + ClusterApplierService clusterApplierService = new ClusterApplierService( + "name", + settings(), + clusterSettings(), + mock(ThreadPool.class) + ); + clusterApplierService.setInitialState(clusterState()); + ClusterService clusterService = new ClusterService( + settings(), + clusterSettings(), + mock(ClusterManagerService.class), + clusterApplierService + ); + return new QueryGroupPersistenceService(clusterService, settings(), clusterSettings()); + } + + public static Tuple preparePersistenceServiceSetup(Map queryGroups) { + Metadata metadata = Metadata.builder().queryGroups(queryGroups).build(); + Settings settings = Settings.builder().build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterApplierService clusterApplierService = new ClusterApplierService( + "name", + settings(), + clusterSettings(), + mock(ThreadPool.class) + ); + clusterApplierService.setInitialState(clusterState); + ClusterService clusterService = new ClusterService( + settings(), + clusterSettings(), + mock(ClusterManagerService.class), + clusterApplierService + ); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + return new Tuple(queryGroupPersistenceService, clusterState); + } + + public static void assertEqualQueryGroups(List listOne, List listTwo) { + assertEquals(listOne.size(), listTwo.size()); + listOne.sort(Comparator.comparing(QueryGroup::getName)); + listTwo.sort(Comparator.comparing(QueryGroup::getName)); + for (int i = 0; i < listOne.size(); i++) { + assertTrue(listOne.get(i).equals(listTwo.get(i))); + } + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java new file mode 100644 index 0000000000000..b0fa96a46df80 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; + +public class CreateQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of CreateQueryGroupRequest. + */ + public void testSerialization() throws IOException { + CreateQueryGroupRequest request = new CreateQueryGroupRequest(queryGroupOne); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + CreateQueryGroupRequest otherRequest = new CreateQueryGroupRequest(streamInput); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); + list1.add(queryGroupOne); + list2.add(otherRequest.getQueryGroup()); + assertEqualQueryGroups(list1, list2); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java new file mode 100644 index 0000000000000..038f015713c5b --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class CreateQueryGroupResponseTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of CreateQueryGroupResponse. + */ + public void testSerialization() throws IOException { + CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + CreateQueryGroupResponse otherResponse = new CreateQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + QueryGroup responseGroup = response.getQueryGroup(); + QueryGroup otherResponseGroup = otherResponse.getQueryGroup(); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(responseGroup); + listTwo.add(otherResponseGroup); + QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Test case to verify the toXContent method of CreateQueryGroupResponse. + */ + public void testToXContentCreateQueryGroup() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + "}"; + assertEquals(expected, actual); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java new file mode 100644 index 0000000000000..533c98b44685d --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -0,0 +1,247 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.search.ResourceType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MEMORY_STRING; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettings; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettingsSet; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; +import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; +import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class QueryGroupPersistenceServiceTests extends OpenSearchTestCase { + + /** + * Test case to validate the creation logic of a QueryGroup + */ + public void testCreateQueryGroup() { + Tuple setup = preparePersistenceServiceSetup(new HashMap<>()); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupOne, clusterState); + Map updatedGroupsMap = newClusterState.getMetadata().queryGroups(); + assertEquals(1, updatedGroupsMap.size()); + assertTrue(updatedGroupsMap.containsKey(_ID_ONE)); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(queryGroupOne); + listTwo.add(updatedGroupsMap.get(_ID_ONE)); + assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Test case to validate the logic for adding a new QueryGroup to a cluster state that already contains + * an existing QueryGroup + */ + public void testCreateAnotherQueryGroup() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupTwo, clusterState); + Map updatedGroups = newClusterState.getMetadata().queryGroups(); + assertEquals(2, updatedGroups.size()); + assertTrue(updatedGroups.containsKey(_ID_TWO)); + Collection values = updatedGroups.values(); + assertEqualQueryGroups(queryGroupList(), new ArrayList<>(values)); + } + + /** + * Test case to ensure the error is thrown when we try to create another QueryGroup with duplicate name + */ + public void testCreateQueryGroupDuplicateName() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + QueryGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.3)) + .updatedAt(1690934400000L) + .build(); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Test case to ensure the error is thrown when we try to create another QueryGroup that will make + * the total resource limits go above 1 + */ + public void testCreateQueryGroupOverflowAllocation() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_TWO, queryGroupTwo)); + QueryGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.41)) + .updatedAt(1690934400000L) + .build(); + + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Test case to ensure the error is thrown when we already have the max allowed number of QueryGroups, but + * we want to create another one + */ + public void testCreateQueryGroupOverflowCount() { + QueryGroup toCreate = builder().name(NAME_NONE_EXISTED) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.5)) + .updatedAt(1690934400000L) + .build(); + Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + QueryGroupPersistenceService queryGroupPersistenceService1 = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Tests the invalid value of {@code node.query_group.max_count} + */ + public void testInvalidMaxQueryGroupCount() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(IllegalArgumentException.class, () -> queryGroupPersistenceService.setMaxQueryGroupCount(-1)); + } + + /** + * Tests the valid value of {@code node.query_group.max_count} + */ + public void testValidMaxSandboxCountSetting() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 100).build(); + ClusterService clusterService = new ClusterService(settings, clusterSettings(), mock(ThreadPool.class)); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings() + ); + queryGroupPersistenceService.setMaxQueryGroupCount(50); + assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); + } + + /** + * Tests PersistInClusterStateMetadata function + */ + public void testPersistInClusterStateMetadata() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); + } + + /** + * Tests PersistInClusterStateMetadata function with inner functions + */ + public void testPersistInClusterStateMetadataInner() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); + ClusterStateUpdateTask capturedTask = captor.getValue(); + assertEquals(queryGroupPersistenceService.createQueryGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); + + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + task.clusterStateProcessed(SOURCE, mock(ClusterState.class), mock(ClusterState.class)); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(listener).onResponse(any(CreateQueryGroupResponse.class)); + } + + /** + * Tests PersistInClusterStateMetadata function with failure + */ + public void testPersistInClusterStateMetadataFailure() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + Exception exception = new RuntimeException("Test Exception"); + task.onFailure(SOURCE, exception); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(listener).onFailure(any(RuntimeException.class)); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java b/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..9ec4a36ff6a5b --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.plugin.wlm; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; +import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; + +/** Runs yaml rest tests */ +public class WorkloadManagementClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { + + public WorkloadManagementClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return OpenSearchClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json new file mode 100644 index 0000000000000..bb4620c01f2d6 --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json @@ -0,0 +1,18 @@ +{ + "create_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group", + "methods": ["PUT", "POST"], + "parts": {} + } + ] + }, + "params":{}, + "body":{ + "description":"The QueryGroup schema" + } + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_create_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_create_query_group.yml new file mode 100644 index 0000000000000..ae82a8146e9cd --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_create_query_group.yml @@ -0,0 +1,90 @@ +"test create QueryGroup API": + - skip: + version: " - 2.16.99" + reason: "QueryGroup WorkloadManagement feature was added in 2.17" + + - do: + create_query_group_context: + body: + { + "name": "analytics", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.4, + "memory": 0.2 + } + } + + - match: { name: "analytics" } + - match: { resiliency_mode: "monitor" } + - match: { resource_limits.cpu: 0.4 } + - match: { resource_limits.memory: 0.2 } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.4, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.61, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": -0.1, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.1, + "memory": 0.2 + } + } + + - do: + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.35, + "memory": 0.25 + } + } + + - match: { name: "analytics2" } + - match: { resiliency_mode: "monitor" } + - match: { resource_limits.cpu: 0.35 } + - match: { resource_limits.memory: 0.25 } diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java index 5169ce18fff79..dd5248738569e 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java @@ -380,7 +380,7 @@ public static String escapePath(Path path) { } /** - * Recursively copy the the source directory to the target directory, preserving permissions. + * Recursively copy the source directory to the target directory, preserving permissions. */ public static void copyDirectory(Path source, Path target) throws IOException { Files.walkFileTree(source, new SimpleFileVisitor() { diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java index 42c7fd667fd8f..741660f972bfb 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java @@ -152,8 +152,8 @@ public void testDanglingIndicesCanBeImported() throws Exception { * 1, then create two indices and delete them both while one node in * the cluster is stopped. The deletion of the second pushes the deletion * of the first out of the graveyard. When the stopped node is resumed, - * only the second index will be found into the graveyard and the the - * other will be considered dangling, and can therefore be listed and + * only the second index will be found into the graveyard and the other + * will be considered dangling, and can therefore be listed and * deleted through the API */ public void testDanglingIndicesCanBeDeleted() throws Exception { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml similarity index 98% rename from rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml rename to rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml index 1c50187534026..e60981dbbf50c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml @@ -70,8 +70,8 @@ --- "Queries": - skip: - version: " - 2.99.99" - reason: "rangeQuery and regexpQuery are supported in 3.0.0 in main branch" + version: " - 2.16.99" + reason: "rangeQuery and regexpQuery are introduced in 2.17.0" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml new file mode 100644 index 0000000000000..040e883b4a4c2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml @@ -0,0 +1,38 @@ +--- +"Index documents with field name containing only dot fail with an IllegalArgumentException": + - skip: + version: " - 2.16.99" + reason: "introduced in 2.17.0" + + - do: + indices.create: + index: test_1 + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + .: bar + } + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + ..: bar + } + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + foo: { + .: bar + } + } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml index c8c08a2d088ac..6239eb7b8cd22 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml @@ -1,5 +1,9 @@ setup: + - skip: + features: allowed_warnings - do: + allowed_warnings: + - "index template [test_template_1] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_template_1] will take precedence during new index creation" indices.put_index_template: name: test_template_1 body: @@ -11,6 +15,8 @@ setup: "priority": 50 - do: + allowed_warnings: + - "index template [test_template_2] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_template_2] will take precedence during new index creation" indices.put_index_template: name: test_template_2 body: diff --git a/server/build.gradle b/server/build.gradle index 429af5d0ac258..5facc73dff968 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -68,6 +68,7 @@ dependencies { api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") + api project(":libs:opensearch-task-commons") compileOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java index dc3c8793a93f6..928c9e33e19cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -46,6 +46,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -69,12 +70,15 @@ import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -89,12 +93,32 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } @Override protected boolean forbidPrivateIndexSettings() { return false; } + @After + public void cleanUp() throws Exception { + // Delete is async. + assertAcked( + client().admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).get() + ); + assertBusy(() -> { + try { + assertEquals(0, getFileCount(translogRepoPath)); + } catch (IOException e) { + fail(); + } + }, 30, TimeUnit.SECONDS); + super.teardown(); + } + public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java new file mode 100644 index 0000000000000..07d6e1379ced8 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -0,0 +1,155 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.client.Client; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { + + private static String INDEX_NAME = "test-index"; + + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + String routingTableRepoName = "remote-routing-repo"; + String routingTableRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String routingTableRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) + .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .build(); + } + + public void testPublication() throws Exception { + // create cluster with multi node (3 master + 2 data) + prepareCluster(3, 2, INDEX_NAME, 1, 2); + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + // update settings on a random node + assertAcked( + internalCluster().client() + .admin() + .cluster() + .updateSettings( + new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "10mb").build() + ) + ) + .actionGet() + ); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + + Map globalMetadataFiles = getMetadataFiles(repository, RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN); + + assertTrue(globalMetadataFiles.containsKey(COORDINATION_METADATA)); + assertTrue(globalMetadataFiles.containsKey(SETTING_METADATA)); + assertTrue(globalMetadataFiles.containsKey(TRANSIENT_SETTING_METADATA)); + assertTrue(globalMetadataFiles.containsKey(TEMPLATES_METADATA)); + assertTrue(globalMetadataFiles.keySet().stream().anyMatch(key -> key.startsWith(CUSTOM_METADATA))); + + Map ephemeralMetadataFiles = getMetadataFiles( + repository, + RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN + ); + + assertTrue(ephemeralMetadataFiles.containsKey(CLUSTER_BLOCKS)); + assertTrue(ephemeralMetadataFiles.containsKey(DISCOVERY_NODES)); + + Map manifestFiles = getMetadataFiles(repository, RemoteClusterMetadataManifest.MANIFEST); + assertTrue(manifestFiles.containsKey(RemoteClusterMetadataManifest.MANIFEST)); + + // get settings from each node and verify that it is updated + Settings settings = clusterService().getSettings(); + logger.info("settings : {}", settings); + for (Client client : clients()) { + ClusterStateResponse response = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + String refreshSetting = response.getState() + .metadata() + .settings() + .get(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()); + assertEquals("10mb", refreshSetting); + } + } + + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { + BlobPath metadataPath = repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add(RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN) + .add(getClusterState().metadata().clusterUUID()) + .add(subDirectory); + return repository.blobStore().blobContainer(metadataPath).listBlobs().keySet().stream().map(fileName -> { + logger.info(fileName); + return fileName.split(DELIMITER)[0]; + }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 8e5193b650868..1cabb8b617ce3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -275,7 +275,7 @@ public void testValidCompositeIndex() { assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( - StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode() ); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); @@ -359,7 +359,7 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( - StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode() ); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java index 8fd7961cab3a7..7bdf33edf1534 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java @@ -298,8 +298,8 @@ public void testMustAcceptDataLossToImportDanglingIndex() throws Exception { * 1, then create two indices and delete them both while one node in * the cluster is stopped. The deletion of the second pushes the deletion * of the first out of the graveyard. When the stopped node is resumed, - * only the second index will be found into the graveyard and the the - * other will be considered dangling, and can therefore be listed and + * only the second index will be found into the graveyard and the other + * will be considered dangling, and can therefore be listed and * deleted through the API */ public void testDanglingIndexCanBeDeleted() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 63a9451a27a12..f83ae3e0ca820 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -313,7 +313,7 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { } } - public static int getFileCount(Path path) throws Exception { + public static int getFileCount(Path path) throws IOException { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 31c73e2fc03ae..86d586cd17146 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -250,7 +250,7 @@ public void testStatsResponseFromLocalNode() { } } - @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.index.shard:TRACE") public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { setup(); // Scenario: @@ -280,11 +280,13 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce .get(0) .getSegmentStats(); logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", zeroStatePrimaryStats.refreshTimeLagMs, zeroStatePrimaryStats.bytesLag, zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed + zeroStatePrimaryStats.uploadBytesFailed, + zeroStatePrimaryStats.totalUploadsSucceeded, + zeroStatePrimaryStats.uploadBytesSucceeded ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded @@ -348,7 +350,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } } - @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.index.shard:TRACE") public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { setup(); // Scenario: @@ -382,11 +384,13 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr .get(0) .getSegmentStats(); logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", zeroStatePrimaryStats.refreshTimeLagMs, zeroStatePrimaryStats.bytesLag, zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed + zeroStatePrimaryStats.uploadBytesFailed, + zeroStatePrimaryStats.totalUploadsSucceeded, + zeroStatePrimaryStats.uploadBytesSucceeded ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded @@ -617,7 +621,7 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce } assertZeroTranslogDownloadStats(translogStats); }); - }, 5, TimeUnit.SECONDS); + }, 10, TimeUnit.SECONDS); } public void testStatsCorrectnessOnFailover() { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 97ef94055faf7..d1d5f568fc09d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -27,6 +27,7 @@ @PublicApi(since = "2.11.0") public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + StatsHolder tookStatsHolder; public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; public static final Setting SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( @@ -40,6 +41,7 @@ public final class SearchRequestStats extends SearchRequestOperationsListener { public SearchRequestStats(ClusterSettings clusterSettings) { this.setEnabled(clusterSettings.get(SEARCH_REQUEST_STATS_ENABLED)); clusterSettings.addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setEnabled); + tookStatsHolder = new StatsHolder(); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { phaseStatsMap.put(searchPhaseName, new StatsHolder()); } @@ -57,6 +59,18 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { return phaseStatsMap.get(searchPhaseName).timing.sum(); } + public long getTookCurrent() { + return tookStatsHolder.current.count(); + } + + public long getTookTotal() { + return tookStatsHolder.total.count(); + } + + public long getTookMetric() { + return tookStatsHolder.timing.sum(); + } + @Override protected void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); @@ -75,6 +89,23 @@ protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } + @Override + protected void onRequestStart(SearchRequestContext searchRequestContext) { + tookStatsHolder.current.inc(); + } + + @Override + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + tookStatsHolder.total.inc(); + tookStatsHolder.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos())); + } + + @Override + protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + } + /** * Holder of statistics values * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 440b9e267cf0a..4da6c68b40733 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -853,6 +853,12 @@ public Map views() { return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); } + public Map queryGroups() { + return Optional.ofNullable((QueryGroupMetadata) this.custom(QueryGroupMetadata.TYPE)) + .map(QueryGroupMetadata::queryGroups) + .orElse(Collections.emptyMap()); + } + public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } @@ -1391,7 +1397,7 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } - private Map getQueryGroups() { + public Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) .map(QueryGroupMetadata::queryGroups) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java index beaab198073df..6ab11b1d6f150 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java @@ -29,37 +29,42 @@ * Class to define the QueryGroup schema * { * "_id": "fafjafjkaf9ag8a9ga9g7ag0aagaga", - * "resourceLimits": { - * "jvm": 0.4 + * "resource_limits": { + * "memory": 0.4 * }, * "resiliency_mode": "enforced", * "name": "analytics", - * "updatedAt": 4513232415 + * "updated_at": 4513232415 * } */ @ExperimentalApi public class QueryGroup extends AbstractDiffable implements ToXContentObject { + public static final String _ID_STRING = "_id"; + public static final String NAME_STRING = "name"; + public static final String RESILIENCY_MODE_STRING = "resiliency_mode"; + public static final String UPDATED_AT_STRING = "updated_at"; + public static final String RESOURCE_LIMITS_STRING = "resource_limits"; private static final int MAX_CHARS_ALLOWED_IN_NAME = 50; private final String name; private final String _id; private final ResiliencyMode resiliencyMode; // It is an epoch in millis private final long updatedAtInMillis; - private final Map resourceLimits; + private final Map resourceLimits; - public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) { + public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) { this(name, UUIDs.randomBase64UUID(), resiliencyMode, resourceLimits, Instant.now().getMillis()); } - public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) { + public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) { Objects.requireNonNull(name, "QueryGroup.name can't be null"); Objects.requireNonNull(resourceLimits, "QueryGroup.resourceLimits can't be null"); Objects.requireNonNull(resiliencyMode, "QueryGroup.resiliencyMode can't be null"); Objects.requireNonNull(_id, "QueryGroup._id can't be null"); - if (name.length() > MAX_CHARS_ALLOWED_IN_NAME) { - throw new IllegalArgumentException("QueryGroup.name shouldn't be more than 50 chars long"); + if (name.length() > MAX_CHARS_ALLOWED_IN_NAME || name.isEmpty()) { + throw new IllegalArgumentException("QueryGroup.name shouldn't be empty or more than 50 chars long"); } if (resourceLimits.isEmpty()) { @@ -92,7 +97,7 @@ public QueryGroup(StreamInput in) throws IOException { in.readString(), in.readString(), ResiliencyMode.fromName(in.readString()), - in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readGenericValue), + in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readDouble), in.readLong() ); } @@ -102,18 +107,18 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(_id); out.writeString(resiliencyMode.getName()); - out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeGenericValue); + out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeDouble); out.writeLong(updatedAtInMillis); } - private void validateResourceLimits(Map resourceLimits) { - for (Map.Entry resource : resourceLimits.entrySet()) { - Double threshold = (Double) resource.getValue(); + private void validateResourceLimits(Map resourceLimits) { + for (Map.Entry resource : resourceLimits.entrySet()) { + Double threshold = resource.getValue(); Objects.requireNonNull(resource.getKey(), "resourceName can't be null"); Objects.requireNonNull(threshold, "resource limit threshold for" + resource.getKey().getName() + " : can't be null"); - if (Double.compare(threshold, 1.0) > 0) { - throw new IllegalArgumentException("resource value should be less than 1.0"); + if (Double.compare(threshold, 0.0) <= 0 || Double.compare(threshold, 1.0) > 0) { + throw new IllegalArgumentException("resource value should be greater than 0 and less or equal to 1.0"); } } } @@ -121,12 +126,12 @@ private void validateResourceLimits(Map resourceLimits) { @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); - builder.field("_id", _id); - builder.field("name", name); - builder.field("resiliency_mode", resiliencyMode.getName()); - builder.field("updatedAt", updatedAtInMillis); + builder.field(_ID_STRING, _id); + builder.field(NAME_STRING, name); + builder.field(RESILIENCY_MODE_STRING, resiliencyMode.getName()); + builder.field(UPDATED_AT_STRING, updatedAtInMillis); // write resource limits - builder.startObject("resourceLimits"); + builder.startObject(RESOURCE_LIMITS_STRING); for (ResourceType resourceType : ResourceType.values()) { if (resourceLimits.containsKey(resourceType)) { builder.field(resourceType.getName(), resourceLimits.get(resourceType)); @@ -139,56 +144,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa } public static QueryGroup fromXContent(final XContentParser parser) throws IOException { - if (parser.currentToken() == null) { // fresh parser? move to the first token - parser.nextToken(); - } - - Builder builder = builder(); - - XContentParser.Token token = parser.currentToken(); - - if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); - } - - String fieldName = ""; - // Map to hold resources - final Map resourceLimits = new HashMap<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if (fieldName.equals("_id")) { - builder._id(parser.text()); - } else if (fieldName.equals("name")) { - builder.name(parser.text()); - } else if (fieldName.equals("resiliency_mode")) { - builder.mode(parser.text()); - } else if (fieldName.equals("updatedAt")) { - builder.updatedAt(parser.longValue()); - } else { - throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); - } - } else if (token == XContentParser.Token.START_OBJECT) { - - if (!fieldName.equals("resourceLimits")) { - throw new IllegalArgumentException( - "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token - ); - } - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else { - resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue()); - } - } - - } - } - builder.resourceLimits(resourceLimits); - return builder.build(); + return Builder.fromXContent(parser).build(); } public static Diff readDiff(final StreamInput in) throws IOException { @@ -201,6 +157,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; QueryGroup that = (QueryGroup) o; return Objects.equals(name, that.name) + && Objects.equals(resiliencyMode, that.resiliencyMode) && Objects.equals(resourceLimits, that.resourceLimits) && Objects.equals(_id, that._id) && updatedAtInMillis == that.updatedAtInMillis; @@ -219,7 +176,7 @@ public ResiliencyMode getResiliencyMode() { return resiliencyMode; } - public Map getResourceLimits() { + public Map getResourceLimits() { return resourceLimits; } @@ -268,7 +225,6 @@ public static ResiliencyMode fromName(String s) { } throw new IllegalArgumentException("Invalid value for QueryGroupMode: " + s); } - } /** @@ -280,10 +236,62 @@ public static class Builder { private String _id; private ResiliencyMode resiliencyMode; private long updatedAt; - private Map resourceLimits; + private Map resourceLimits; private Builder() {} + public static Builder fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + + Builder builder = builder(); + + XContentParser.Token token = parser.currentToken(); + + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); + } + + String fieldName = ""; + // Map to hold resources + final Map resourceLimits = new HashMap<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (fieldName.equals(_ID_STRING)) { + builder._id(parser.text()); + } else if (fieldName.equals(NAME_STRING)) { + builder.name(parser.text()); + } else if (fieldName.equals(RESILIENCY_MODE_STRING)) { + builder.mode(parser.text()); + } else if (fieldName.equals(UPDATED_AT_STRING)) { + builder.updatedAt(parser.longValue()); + } else { + throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + + if (!fieldName.equals(RESOURCE_LIMITS_STRING)) { + throw new IllegalArgumentException( + "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token + ); + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else { + resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue()); + } + } + + } + } + return builder.resourceLimits(resourceLimits); + } + public Builder name(String name) { this.name = name; return this; @@ -304,7 +312,7 @@ public Builder updatedAt(long updatedAt) { return this; } - public Builder resourceLimits(Map resourceLimits) { + public Builder resourceLimits(Map resourceLimits) { this.resourceLimits = resourceLimits; return this; } @@ -312,6 +320,5 @@ public Builder resourceLimits(Map resourceLimits) { public QueryGroup build() { return new QueryGroup(name, _id, resiliencyMode, resourceLimits, updatedAt); } - } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index 3c578a8c5c01f..0f1ff3138ef90 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -20,14 +20,15 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.gateway.remote.RemoteStateTransferException; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; @@ -262,12 +263,13 @@ protected void doStart() { clusterSettings ); - this.remoteRoutingTableDiffStore = new RemoteClusterStateBlobStore<>( + this.remoteRoutingTableDiffStore = new RemoteWriteableEntityBlobStore<>( new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), blobStoreRepository, clusterName, threadPool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ); } diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index ed324e4e62d8f..ee21c343e2ea1 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -38,7 +38,7 @@ import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; -import org.apache.logging.log4j.util.Strings; +import org.opensearch.core.common.Strings; import java.nio.charset.Charset; import java.util.StringJoiner; @@ -84,7 +84,7 @@ public static JsonThrowablePatternConverter newInstance(final Configuration conf @Override public void format(final LogEvent event, final StringBuilder toAppendTo) { String consoleStacktrace = formatStacktrace(event); - if (Strings.isNotEmpty(consoleStacktrace)) { + if (!Strings.isNullOrEmpty(consoleStacktrace)) { String jsonStacktrace = formatJson(consoleStacktrace); toAppendTo.append(", "); diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java new file mode 100644 index 0000000000000..b9492da04680d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; + +/** + * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage + * + * @param The class type which can be uploaded to or downloaded from a blob storage. + */ +public abstract class AbstractClusterMetadataWriteableBlobEntity extends RemoteWriteableBlobEntity { + + protected final NamedXContentRegistry namedXContentRegistry; + + public AbstractClusterMetadataWriteableBlobEntity( + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor); + this.namedXContentRegistry = namedXContentRegistry; + } + + public AbstractClusterMetadataWriteableBlobEntity(final String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor); + this.namedXContentRegistry = null; + } + + public abstract UploadedMetadata getUploadedMetadata(); + + public NamedXContentRegistry getNamedXContentRegistry() { + return namedXContentRegistry; + } +} diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java index dc301635c4a80..8e2de1580a49f 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java @@ -31,7 +31,7 @@ public abstract class AbstractRemoteWritableEntityManager implements RemoteWrita * @return the remote writable entity store for the given entity * @throws IllegalArgumentException if the entity type is unknown */ - protected RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { + protected RemoteWritableEntityStore getStore(AbstractClusterMetadataWriteableBlobEntity entity) { RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); if (remoteStore == null) { throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); @@ -49,7 +49,7 @@ protected RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity en */ protected abstract ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ); @@ -64,21 +64,21 @@ protected abstract ActionListener getWrappedWriteListener( */ protected abstract ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ); @Override public void writeAsync( String component, - AbstractRemoteWritableBlobEntity entity, + AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener ) { getStore(entity).writeAsync(entity, getWrappedWriteListener(component, entity, listener)); } @Override - public void readAsync(String component, AbstractRemoteWritableBlobEntity entity, ActionListener listener) { + public void readAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener) { getStore(entity).readAsync(entity, getWrappedReadListener(component, entity, listener)); } } diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java index 7693d1b5284bd..c27598e368e4d 100644 --- a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java @@ -29,7 +29,7 @@ public interface RemoteWritableEntityManager { * {@link ActionListener#onFailure(Exception)} method is called with * an exception if the read operation fails. */ - void readAsync(String component, AbstractRemoteWritableBlobEntity entity, ActionListener listener); + void readAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener); /** * Performs an asynchronous write operation for the specified component and entity. @@ -43,5 +43,5 @@ public interface RemoteWritableEntityManager { * {@link ActionListener#onFailure(Exception)} method is called with * an exception if the write operation fails. */ - void writeAsync(String component, AbstractRemoteWritableBlobEntity entity, ActionListener listener); + void writeAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener); } diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java similarity index 62% rename from server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java rename to server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java index 237c077cb673c..f034ce2d1adf1 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java @@ -10,38 +10,25 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.compress.Compressor; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; /** - * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage - * - * @param The class type which can be uploaded to or downloaded from a blob storage. + * The abstract class which represents a {@link RemoteWriteableEntity} that can be written to a store + * @param the entity to be written */ -public abstract class AbstractRemoteWritableBlobEntity implements RemoteWriteableEntity { +public abstract class RemoteWriteableBlobEntity implements RemoteWriteableEntity { protected String blobFileName; protected String blobName; private final String clusterUUID; private final Compressor compressor; - private final NamedXContentRegistry namedXContentRegistry; private String[] pathTokens; - public AbstractRemoteWritableBlobEntity( - final String clusterUUID, - final Compressor compressor, - final NamedXContentRegistry namedXContentRegistry - ) { + public RemoteWriteableBlobEntity(final String clusterUUID, final Compressor compressor) { this.clusterUUID = clusterUUID; this.compressor = compressor; - this.namedXContentRegistry = namedXContentRegistry; - } - - public AbstractRemoteWritableBlobEntity(final String clusterUUID, final Compressor compressor) { - this(clusterUUID, compressor, null); } public abstract BlobPathParameters getBlobPathParameters(); @@ -80,16 +67,10 @@ public String clusterUUID() { return clusterUUID; } - public abstract UploadedMetadata getUploadedMetadata(); - public void setFullBlobName(BlobPath blobPath) { this.blobName = blobPath.buildAsString() + blobFileName; } - public NamedXContentRegistry getNamedXContentRegistry() { - return namedXContentRegistry; - } - protected Compressor getCompressor() { return compressor; } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java similarity index 78% rename from server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java rename to server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java index cd8b8aa41ad65..baa44a7c9bde9 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java @@ -6,49 +6,48 @@ * compatible open source license. */ -package org.opensearch.gateway.remote.model; +package org.opensearch.common.remote; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; -import org.opensearch.common.remote.RemoteWritableEntityStore; -import org.opensearch.common.remote.RemoteWriteableEntity; import org.opensearch.core.action.ActionListener; -import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.concurrent.ExecutorService; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; - /** * Abstract class for a blob type storage * * @param The entity which can be uploaded to / downloaded from blob store * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for T entity. */ -public class RemoteClusterStateBlobStore> implements RemoteWritableEntityStore { +public class RemoteWriteableEntityBlobStore> implements RemoteWritableEntityStore { private final BlobStoreTransferService transferService; private final BlobStoreRepository blobStoreRepository; private final String clusterName; private final ExecutorService executorService; + private final String pathToken; - public RemoteClusterStateBlobStore( + public RemoteWriteableEntityBlobStore( final BlobStoreTransferService blobStoreTransferService, final BlobStoreRepository blobStoreRepository, final String clusterName, final ThreadPool threadPool, - final String executor + final String executor, + final String pathToken ) { this.transferService = blobStoreTransferService; this.blobStoreRepository = blobStoreRepository; this.clusterName = clusterName; this.executorService = threadPool.executor(executor); + this.pathToken = pathToken; } @Override @@ -95,13 +94,10 @@ public String getClusterName() { } public BlobPath getBlobPathPrefix(String clusterUUID) { - return blobStoreRepository.basePath() - .add(RemoteClusterStateUtils.encodeString(getClusterName())) - .add(CLUSTER_STATE_PATH_TOKEN) - .add(clusterUUID); + return blobStoreRepository.basePath().add(encodeString(getClusterName())).add(pathToken).add(clusterUUID); } - public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { BlobPath blobPath = getBlobPathPrefix(obj.clusterUUID()); for (String token : obj.getBlobPathParameters().getPathTokens()) { blobPath = blobPath.add(token); @@ -109,7 +105,7 @@ public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity o return blobPath; } - public BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForDownload(final RemoteWriteableBlobEntity obj) { String[] pathTokens = obj.getBlobPathTokens(); BlobPath blobPath = new BlobPath(); if (pathTokens == null || pathTokens.length < 1) { @@ -122,4 +118,8 @@ public BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity return blobPath; } + private static String encodeString(String content) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); + } + } diff --git a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java index d3d3304cb909a..cfe2bbb85bda4 100644 --- a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java @@ -61,6 +61,13 @@ public void run() { "Time taken to execute timed runnables in this cycle:[{}ms]", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) ); + onComplete(); } + /** + * Callback method that is invoked after all {@link TimeoutAwareRunnable} instances in the batch have been processed. + * By default, this method does nothing, but it can be overridden by subclasses or modified in the implementation if + * there is a need to perform additional actions once the batch execution is completed. + */ + public void onComplete() {} } diff --git a/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java new file mode 100644 index 0000000000000..2d7948d414937 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; + +import java.io.IOException; + +/** + * A bitset backed by a byte array. This will initialize and set bits in the byte array based on the index. + */ +public class ByteArrayBackedBitset { + private final byte[] byteArray; + + /** + * Constructor which uses an on heap list. This should be using during construction of the bitset. + */ + public ByteArrayBackedBitset(int capacity) { + byteArray = new byte[capacity]; + } + + /** + * Constructor which set the Lucene's RandomAccessInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(RandomAccessInput in, long offset, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(offset + i); + i++; + } + } + + /** + * Constructor which set the Lucene's IndexInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(IndexInput in, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(); + i++; + } + } + + /** + * Sets the bit at the given index to 1. + * Each byte can indicate 8 bits, so the index is divided by 8 to get the byte array index. + * @param index the index to set the bit + */ + public void set(int index) { + int byteArrIndex = index >> 3; + byteArray[byteArrIndex] |= (byte) (1 << (index & 7)); + } + + public int write(IndexOutput output) throws IOException { + int numBytes = 0; + for (Byte bitSet : byteArray) { + output.writeByte(bitSet); + numBytes += Byte.BYTES; + } + return numBytes; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(int index) throws IOException { + int byteArrIndex = index >> 3; + return (byteArray[byteArrIndex] & (1 << (index & 7))) != 0; + } + + public int getCurrBytesRead() { + return byteArray.length; + } +} diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 9d57e6939e3ae..e2554d61116ad 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -72,6 +72,11 @@ public class FeatureFlags { */ public static final String REMOTE_PUBLICATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.publication.enabled"; + /** + * Gates the functionality of background task execution. + */ + public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = "opensearch.experimental.feature.task.background.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 998122d9e5c43..d24571fc5778d 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -9,6 +9,7 @@ package org.opensearch.common.xcontent; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.core.xcontent.DeprecationHandler; @@ -23,6 +24,9 @@ import java.math.BigInteger; import java.nio.CharBuffer; import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.LinkedList; /** * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser @@ -32,21 +36,20 @@ */ public class JsonToStringXContentParser extends AbstractXContentParser { private final String fieldTypeName; - private XContentParser parser; + private final XContentParser parser; - private ArrayList valueList = new ArrayList<>(); - private ArrayList valueAndPathList = new ArrayList<>(); - private ArrayList keyList = new ArrayList<>(); + private final ArrayList valueList = new ArrayList<>(); + private final ArrayList valueAndPathList = new ArrayList<>(); + private final ArrayList keyList = new ArrayList<>(); - private XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); + private final XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); - private NamedXContentRegistry xContentRegistry; + private final NamedXContentRegistry xContentRegistry; - private DeprecationHandler deprecationHandler; + private final DeprecationHandler deprecationHandler; private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; private static final String VALUE_SUFFIX = "._value"; - private static final String DOT_SYMBOL = "."; private static final String EQUAL_SYMBOL = "="; public JsonToStringXContentParser( @@ -63,9 +66,14 @@ public JsonToStringXContentParser( } public XContentParser parseObject() throws IOException { + assert currentToken() == Token.START_OBJECT; + parser.nextToken(); // Skip the outer START_OBJECT. Need to return on END_OBJECT. + builder.startObject(); - StringBuilder path = new StringBuilder(fieldTypeName); - parseToken(path, null); + LinkedList path = new LinkedList<>(Collections.singleton(fieldTypeName)); + while (currentToken() != Token.END_OBJECT) { + parseToken(path); + } builder.field(this.fieldTypeName, keyList); builder.field(this.fieldTypeName + VALUE_SUFFIX, valueList); builder.field(this.fieldTypeName + VALUE_AND_PATH_SUFFIX, valueAndPathList); @@ -74,75 +82,55 @@ public XContentParser parseObject() throws IOException { return JsonXContent.jsonXContent.createParser(this.xContentRegistry, this.deprecationHandler, String.valueOf(jString)); } - private void parseToken(StringBuilder path, String currentFieldName) throws IOException { - - while (this.parser.nextToken() != Token.END_OBJECT) { - if (this.parser.currentName() != null) { - currentFieldName = this.parser.currentName(); + private void parseToken(Deque path) throws IOException { + if (this.parser.currentToken() == Token.FIELD_NAME) { + String fieldName = this.parser.currentName(); + path.addLast(fieldName); // Pushing onto the stack *must* be matched by pop + String parts = fieldName; + while (parts.contains(".")) { // Extract the intermediate keys maybe present in fieldName + int dotPos = parts.indexOf('.'); + String part = parts.substring(0, dotPos); + this.keyList.add(part); + parts = parts.substring(dotPos + 1); } - StringBuilder parsedFields = new StringBuilder(); - - if (this.parser.currentToken() == Token.FIELD_NAME) { - path.append(DOT_SYMBOL).append(currentFieldName); - int dotIndex = currentFieldName.indexOf(DOT_SYMBOL); - String fieldNameSuffix = currentFieldName; - // The field name may be of the form foo.bar.baz - // If that's the case, each "part" is a key. - while (dotIndex >= 0) { - String fieldNamePrefix = fieldNameSuffix.substring(0, dotIndex); - if (!fieldNamePrefix.isEmpty()) { - this.keyList.add(fieldNamePrefix); - } - fieldNameSuffix = fieldNameSuffix.substring(dotIndex + 1); - dotIndex = fieldNameSuffix.indexOf(DOT_SYMBOL); - } - if (!fieldNameSuffix.isEmpty()) { - this.keyList.add(fieldNameSuffix); - } - } else if (this.parser.currentToken() == Token.START_ARRAY) { - parseToken(path, currentFieldName); - break; - } else if (this.parser.currentToken() == Token.END_ARRAY) { - // skip - } else if (this.parser.currentToken() == Token.START_OBJECT) { - parseToken(path, currentFieldName); - int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); - - if (dotIndex != -1 && path.length() > currentFieldName.length()) { - path.setLength(path.length() - currentFieldName.length() - 1); - } - } else { - if (!path.toString().contains(currentFieldName)) { - path.append(DOT_SYMBOL).append(currentFieldName); - } - parseValue(parsedFields); - this.valueList.add(parsedFields.toString()); - this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); - int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); - if (dotIndex != -1 && path.length() > currentFieldName.length()) { - path.setLength(path.length() - currentFieldName.length() - 1); - } + this.keyList.add(parts); // parts has no dot, so either it's the original fieldName or it's the last part + this.parser.nextToken(); // advance to the value of fieldName + parseToken(path); // parse the value for fieldName (which will be an array, an object, or a primitive value) + path.removeLast(); // Here is where we pop fieldName from the stack (since we're done with the value of fieldName) + // Note that whichever other branch we just passed through has already ended with nextToken(), so we + // don't need to call it. + } else if (this.parser.currentToken() == Token.START_ARRAY) { + parser.nextToken(); + while (this.parser.currentToken() != Token.END_ARRAY) { + parseToken(path); } - + this.parser.nextToken(); + } else if (this.parser.currentToken() == Token.START_OBJECT) { + parser.nextToken(); + while (this.parser.currentToken() != Token.END_OBJECT) { + parseToken(path); + } + this.parser.nextToken(); + } else if (this.parser.currentToken().isValue()) { + String parsedValue = parseValue(); + if (parsedValue != null) { + this.valueList.add(parsedValue); + this.valueAndPathList.add(Strings.collectionToDelimitedString(path, ".") + EQUAL_SYMBOL + parsedValue); + } + this.parser.nextToken(); } } - private void parseValue(StringBuilder parsedFields) throws IOException { + private String parseValue() throws IOException { switch (this.parser.currentToken()) { case VALUE_BOOLEAN: case VALUE_NUMBER: case VALUE_STRING: case VALUE_NULL: - parsedFields.append(this.parser.textOrNull()); - break; + return this.parser.textOrNull(); // Handle other token types as needed - case FIELD_NAME: - case VALUE_EMBEDDED_OBJECT: - case END_ARRAY: - case START_ARRAY: - break; default: - throw new IOException("Unsupported token type [" + parser.currentToken() + "]"); + throw new IOException("Unsupported value token type [" + parser.currentToken() + "]"); } } diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 2748938d8b761..709c0eba4f57f 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Strings; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -61,6 +60,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -1300,7 +1300,7 @@ public static List collectFileCacheDataPath(NodePath fileCacheNodePath) th * Resolve the custom path for a index's shard. */ public static Path resolveBaseCustomLocation(String customDataPath, Path sharedDataPath, int nodeLockId) { - if (Strings.isNotEmpty(customDataPath)) { + if (!Strings.isNullOrEmpty(customDataPath)) { // This assert is because this should be caught by MetadataCreateIndexService assert sharedDataPath != null; return sharedDataPath.resolve(customDataPath).resolve(Integer.toString(nodeLockId)); diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 0d6af943d39e0..2b6c5e3f5ae53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -47,7 +47,6 @@ import org.opensearch.core.index.shard.ShardId; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -82,23 +81,29 @@ public void allocateUnassigned( executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); } - protected void allocateUnassignedBatchOnTimeout(List shardRoutings, RoutingAllocation allocation, boolean primary) { - Set shardIdsFromBatch = new HashSet<>(); - for (ShardRouting shardRouting : shardRoutings) { - ShardId shardId = shardRouting.shardId(); - shardIdsFromBatch.add(shardId); + protected void allocateUnassignedBatchOnTimeout(Set shardIds, RoutingAllocation allocation, boolean primary) { + if (shardIds.isEmpty()) { + return; } RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); AllocateUnassignedDecision allocationDecision; - if (unassignedShard.primary() == primary && shardIdsFromBatch.contains(unassignedShard.shardId())) { + if (unassignedShard.primary() == primary && shardIds.contains(unassignedShard.shardId())) { + if (isResponsibleFor(unassignedShard) == false) { + continue; + } allocationDecision = AllocateUnassignedDecision.throttle(null); executeDecision(unassignedShard, allocationDecision, allocation, iterator); } } } + /** + * Is the allocator responsible for allocating the given {@link ShardRouting}? + */ + protected abstract boolean isResponsibleFor(ShardRouting shardRouting); + protected void executeDecision( ShardRouting shardRouting, AllocateUnassignedDecision allocateUnassignedDecision, diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index f41545cbdf9bf..dea7ca9a08edd 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -82,7 +82,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() // must be primary && shard.unassigned() // must be unassigned // only handle either an existing store or a snapshot recovery diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index aaf0d696e1444..c30ee8479ac97 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -191,7 +191,7 @@ public void processExistingRecoveries(RoutingAllocation allocation) { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() == false // must be a replica && shard.unassigned() // must be unassigned // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index 0818b187271cb..020a543ac5fc5 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -173,7 +173,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( RoutingAllocation allocation, Supplier> nodeStoreFileMetaDataMapSupplier ) { - if (!isResponsibleFor(shardRouting)) { + if (isResponsibleFor(shardRouting) == false) { return AllocateUnassignedDecision.NOT_TAKEN; } Tuple> result = canBeAllocatedToAtLeastOneNode(shardRouting, allocation); diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 6c6b1126a78d6..d18304ea73ed0 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -277,17 +277,14 @@ protected BatchRunnableExecutor innerAllocateUnassignedBatch( } List runnables = new ArrayList<>(); if (primary) { + Set timedOutPrimaryShardIds = new HashSet<>(); batchIdToStartedShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) .forEach(shardsBatch -> runnables.add(new TimeoutAwareRunnable() { @Override public void onTimeout() { - primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout( - shardsBatch.getBatchedShardRoutings(), - allocation, - true - ); + timedOutPrimaryShardIds.addAll(shardsBatch.getBatchedShards()); } @Override @@ -295,15 +292,22 @@ public void run() { primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation); } })); - return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout); + return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] primary shards", timedOutPrimaryShardIds.size()); + primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutPrimaryShardIds, allocation, true); + } + }; } else { + Set timedOutReplicaShardIds = new HashSet<>(); batchIdToStoreShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) .forEach(batch -> runnables.add(new TimeoutAwareRunnable() { @Override public void onTimeout() { - replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(batch.getBatchedShardRoutings(), allocation, false); + timedOutReplicaShardIds.addAll(batch.getBatchedShards()); } @Override @@ -311,7 +315,13 @@ public void run() { replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation); } })); - return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout); + return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] replica shards", timedOutReplicaShardIds.size()); + replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutReplicaShardIds, allocation, false); + } + }; } } @@ -846,11 +856,11 @@ public int getNumberOfStoreShardBatches() { return batchIdToStoreShardBatch.size(); } - private void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatewayAllocatorTimeout) { + protected void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatewayAllocatorTimeout) { this.primaryShardsBatchGatewayAllocatorTimeout = primaryShardsBatchGatewayAllocatorTimeout; } - private void setReplicaBatchAllocatorTimeout(TimeValue replicaShardsBatchGatewayAllocatorTimeout) { + protected void setReplicaBatchAllocatorTimeout(TimeValue replicaShardsBatchGatewayAllocatorTimeout) { this.replicaShardsBatchGatewayAllocatorTimeout = replicaShardsBatchGatewayAllocatorTimeout; } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java index 67ac8d2b9a810..877e2585cb1eb 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java @@ -11,12 +11,12 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.gateway.remote.model.RemoteClusterBlocks; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; import org.opensearch.gateway.remote.model.RemoteReadResult; @@ -28,7 +28,7 @@ import java.util.Map; /** - * A Manager which provides APIs to upload and download attributes of ClusterState to the {@link RemoteClusterStateBlobStore} + * A Manager which provides APIs to upload and download attributes of ClusterState to the {@link RemoteWriteableEntityBlobStore} * * @opensearch.internal */ @@ -47,32 +47,35 @@ public class RemoteClusterStateAttributesManager extends AbstractRemoteWritableE ) { this.remoteWritableEntityStores.put( RemoteDiscoveryNodes.DISCOVERY_NODES, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteClusterBlocks.CLUSTER_BLOCKS, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); } @@ -80,7 +83,7 @@ public class RemoteClusterStateAttributesManager extends AbstractRemoteWritableE @Override protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( @@ -92,7 +95,7 @@ protected ActionListener getWrappedWriteListener( @Override protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java index 8691187c7fbfa..02db15477ff95 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Strings; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; @@ -23,6 +22,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.threadpool.ThreadPool; @@ -125,8 +125,8 @@ void cleanUpStaleFiles() { ClusterState currentAppliedState = clusterApplierService.state(); if (currentAppliedState.nodes().isLocalNodeElectedClusterManager()) { long cleanUpAttemptStateVersion = currentAppliedState.version(); - assert Strings.isNotEmpty(currentAppliedState.getClusterName().value()) : "cluster name is not set"; - assert Strings.isNotEmpty(currentAppliedState.metadata().clusterUUID()) : "cluster uuid is not set"; + assert !Strings.isNullOrEmpty(currentAppliedState.getClusterName().value()) : "cluster name is not set"; + assert !Strings.isNullOrEmpty(currentAppliedState.metadata().clusterUUID()) : "cluster uuid is not set"; if (cleanUpAttemptStateVersion - lastCleanupAttemptStateVersion > SKIP_CLEANUP_STATE_CHANGES) { logger.info( "Cleaning up stale remote state files for cluster [{}] with uuid [{}]. Last clean was done before {} updates", diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 674279f2251bd..910f601a81ca8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -603,7 +603,7 @@ UploadedMetadataResults writeMetadataInParallel( new RemoteDiscoveryNodes( clusterState.nodes(), clusterState.version(), - clusterState.stateUUID(), + clusterState.metadata().clusterUUID(), blobStoreRepository.getCompressor() ), listener diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java index 5a6f4b7e9f1f1..763a8e3ff4951 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java @@ -16,8 +16,9 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -26,7 +27,6 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; import org.opensearch.gateway.remote.model.RemoteCustomMetadata; import org.opensearch.gateway.remote.model.RemoteGlobalMetadata; @@ -85,72 +85,79 @@ public class RemoteGlobalMetadataManager extends AbstractRemoteWritableEntityMan this.namedWriteableRegistry = namedWriteableRegistry; this.remoteWritableEntityStores.put( RemoteGlobalMetadata.GLOBAL_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteCoordinationMetadata.COORDINATION_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemotePersistentSettingsMetadata.SETTING_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteTemplatesMetadata.TEMPLATES_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteCustomMetadata.CUSTOM_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); @@ -159,7 +166,7 @@ public class RemoteGlobalMetadataManager extends AbstractRemoteWritableEntityMan @Override protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( @@ -171,7 +178,7 @@ protected ActionListener getWrappedWriteListener( @Override protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java index c30721c8f625c..d1f08a7c2a33d 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -9,15 +9,15 @@ package org.opensearch.gateway.remote; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteIndexMetadata; import org.opensearch.gateway.remote.model.RemoteReadResult; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -58,12 +58,13 @@ public RemoteIndexMetadataManager( ) { this.remoteWritableEntityStores.put( RemoteIndexMetadata.INDEX, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); @@ -106,7 +107,7 @@ private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeo @Override protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( @@ -118,7 +119,7 @@ protected ActionListener getWrappedWriteListener( @Override protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java index cb09de1a6ec44..0ccadd7dd18da 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; @@ -23,7 +24,6 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -62,7 +62,7 @@ public class RemoteManifestManager { private volatile TimeValue metadataManifestUploadTimeout; private final String nodeId; - private final RemoteClusterStateBlobStore manifestBlobStore; + private final RemoteWriteableEntityBlobStore manifestBlobStore; private final Compressor compressor; private final NamedXContentRegistry namedXContentRegistry; // todo remove blobStorerepo from here @@ -78,12 +78,13 @@ public class RemoteManifestManager { ) { this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); this.nodeId = nodeId; - this.manifestBlobStore = new RemoteClusterStateBlobStore<>( + this.manifestBlobStore = new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ); ; clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java index 9c5fbd5941640..101daaa143a66 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link ClusterBlocks} to/from remote blob store */ -public class RemoteClusterBlocks extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterBlocks extends AbstractClusterMetadataWriteableBlobEntity { public static final String CLUSTER_BLOCKS = "blocks"; public static final ChecksumWritableBlobStoreFormat CLUSTER_BLOCKS_FORMAT = new ChecksumWritableBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java index acaae3173315a..5f79b690af574 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link ClusterMetadataManifest} to/from remote blob store */ -public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterMetadataManifest extends AbstractClusterMetadataWriteableBlobEntity { public static final String MANIFEST = "manifest"; public static final int SPLITTED_MANIFEST_FILE_LENGTH = 6; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java index affbc7ba66cb8..e5e44525520f4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java @@ -11,7 +11,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store */ -public class RemoteClusterStateCustoms extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterStateCustoms extends AbstractClusterMetadataWriteableBlobEntity { public static final String CLUSTER_STATE_CUSTOM = "cluster-state-custom"; public final ChecksumWritableBlobStoreFormat clusterStateCustomsFormat; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java index a90721ab59f66..63cc96e3e02c4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading {@link CoordinationMetadata} to/from remote blob store */ -public class RemoteCoordinationMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteCoordinationMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String COORDINATION_METADATA = "coordination"; public static final ChecksumBlobStoreFormat COORDINATION_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java index ec5dfbec820d4..8e850e903954a 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store */ -public class RemoteCustomMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteCustomMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String CUSTOM_METADATA = "custom"; public static final String CUSTOM_DELIMITER = "--"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java index fb399e2899cdd..446207a767009 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link DiscoveryNodes} to/from remote blob store */ -public class RemoteDiscoveryNodes extends AbstractRemoteWritableBlobEntity { +public class RemoteDiscoveryNodes extends AbstractClusterMetadataWriteableBlobEntity { public static final String DISCOVERY_NODES = "nodes"; public static final ChecksumWritableBlobStoreFormat DISCOVERY_NODES_FORMAT = new ChecksumWritableBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java index 09f07de0d5c24..0082f873f8dba 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -25,7 +25,7 @@ /** * Wrapper class for uploading/downloading global metadata ({@link Metadata}) to/from remote blob store */ -public class RemoteGlobalMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteGlobalMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String GLOBAL_METADATA = "global_metadata"; public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java index 1debf75cdfec9..dee48237e5c4c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; @@ -28,7 +28,7 @@ /** * Wrapper class for uploading/downloading {@link DiffableStringMap} to/from remote blob store */ -public class RemoteHashesOfConsistentSettings extends AbstractRemoteWritableBlobEntity { +public class RemoteHashesOfConsistentSettings extends AbstractClusterMetadataWriteableBlobEntity { public static final String HASHES_OF_CONSISTENT_SETTINGS = "hashes-of-consistent-settings"; public static final ChecksumWritableBlobStoreFormat HASHES_OF_CONSISTENT_SETTINGS_FORMAT = new ChecksumWritableBlobStoreFormat<>("hashes-of-consistent-settings", DiffableStringMap::readFrom); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java index 830b09b92e2cb..5308f92c633b1 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link IndexMetadata} to/from remote blob store */ -public class RemoteIndexMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteIndexMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 2; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java index 9ee3db8f289e3..81042f289254c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.Settings; import org.opensearch.core.compress.Compressor; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading persistent {@link Settings} to/from remote blob store */ -public class RemotePersistentSettingsMetadata extends AbstractRemoteWritableBlobEntity { +public class RemotePersistentSettingsMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String SETTING_METADATA = "settings"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java index 7c4a5bf2236a1..1a28c97dc8a77 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java @@ -9,10 +9,13 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; import org.opensearch.common.remote.RemoteWriteableEntity; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; @@ -28,8 +31,8 @@ * @param which can be uploaded to / downloaded from blob store * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for IndexRoutingTable entity. */ -public class RemoteRoutingTableBlobStore> extends - RemoteClusterStateBlobStore { +public class RemoteRoutingTableBlobStore> extends + RemoteWriteableEntityBlobStore { /** * This setting is used to set the remote routing table store blob store path type strategy. @@ -66,7 +69,14 @@ public RemoteRoutingTableBlobStore( String executor, ClusterSettings clusterSettings ) { - super(blobStoreTransferService, blobStoreRepository, clusterName, threadPool, executor); + super( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadPool, + executor, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN + ); this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); @@ -74,7 +84,7 @@ public RemoteRoutingTableBlobStore( } @Override - public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { assert obj.getBlobPathParameters().getPathTokens().size() == 1 : "Unexpected tokens in RemoteRoutingTableObject"; BlobPath indexRoutingPath = getBlobPathPrefix(obj.clusterUUID()).add(INDEX_ROUTING_TABLE); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java index 4513d35aef5e8..6ae8a533f9a21 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading {@link TemplatesMetadata} to/from remote blob store */ -public class RemoteTemplatesMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteTemplatesMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String TEMPLATES_METADATA = "templates"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java index fd0526f05d015..d4f3837f80084 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.Settings; import org.opensearch.core.compress.Compressor; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading transient {@link Settings} to/from remote blob store */ -public class RemoteTransientSettingsMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteTransientSettingsMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String TRANSIENT_SETTING_METADATA = "transient-settings"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java index 40b5bafde2b13..46c5074c48eb8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; @@ -27,7 +27,7 @@ /** * Remote store object for IndexRoutingTable */ -public class RemoteIndexRoutingTable extends AbstractRemoteWritableBlobEntity { +public class RemoteIndexRoutingTable extends AbstractClusterMetadataWriteableBlobEntity { public static final String INDEX_ROUTING_TABLE = "index-routing"; public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java index e876d939490d0..2370417dc14df 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java @@ -12,7 +12,7 @@ import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; @@ -30,7 +30,7 @@ * Represents a incremental difference between {@link org.opensearch.cluster.routing.RoutingTable} objects that can be serialized and deserialized. * This class is responsible for writing and reading the differences between RoutingTables to and from an input/output stream. */ -public class RemoteRoutingTableDiff extends AbstractRemoteWritableBlobEntity { +public class RemoteRoutingTableDiff extends AbstractClusterMetadataWriteableBlobEntity { private final RoutingTableIncrementalDiff routingTableIncrementalDiff; private long term; diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index ed20ec89a9099..7048f08faff9f 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -77,7 +77,7 @@ default void handleException(Exception ex) {} /** * Returns the contextual property associated with this specific HTTP channel (the - * implementation of how such properties are managed depends on the the particular + * implementation of how such properties are managed depends on the particular * transport engine). * * @param name the name of the property diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index eab070e1c6c10..dc1bf94662385 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -98,6 +98,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -108,8 +109,6 @@ import java.util.function.Function; import java.util.function.Supplier; -import static org.apache.logging.log4j.util.Strings.toRootUpperCase; - /** * IndexModule represents the central extension point for index level custom implementations like: *