diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 8076adcf00ca9..68d02d5f7d544 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1 +1,27 @@
-* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah
+# CODEOWNERS manages notifications, not PR approvals
+# For PR approvals see /.github/workflows/maintainer-approval.yml
+
+# Files have a single rule applied, the last match decides the owner
+# If you would like to more specifically apply ownership, include existing owner in new sub fields
+
+# To verify changes of CODEOWNERS file
+# In VSCode
+# 1. Install extension https://marketplace.visualstudio.com/items?itemName=jasonnutter.vscode-codeowners
+# 2. Go to a file
+# 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file.
+
+# Default ownership for all repo files
+* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah
+
+/modules/transport-netty4/ @peternied
+
+/plugins/identity-shiro/ @peternied
+
+/server/src/main/java/org/opensearch/extensions/ @peternied
+/server/src/main/java/org/opensearch/identity/ @peternied
+/server/src/main/java/org/opensearch/threadpool/ @peternied
+/server/src/main/java/org/opensearch/transport/ @peternied
+
+/.github/ @peternied
+
+/MAINTAINERS.md @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah
diff --git a/.github/ISSUE_TEMPLATE/bug_template.md b/.github/ISSUE_TEMPLATE/bug_template.md
deleted file mode 100644
index be3ae51b237ee..0000000000000
--- a/.github/ISSUE_TEMPLATE/bug_template.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-name: 🐛 Bug report
-about: Create a report to help us improve
-title: "[BUG]"
-labels: 'bug, untriaged'
-assignees: ''
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Go to '...'
-2. Click on '....'
-3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Plugins**
-Please list all plugins currently enabled.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Host/Environment (please complete the following information):**
- - OS: [e.g. iOS]
- - Version [e.g. 22]
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/bug_template.yml b/.github/ISSUE_TEMPLATE/bug_template.yml
new file mode 100644
index 0000000000000..2cd1ee8a7e688
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_template.yml
@@ -0,0 +1,79 @@
+name: 🐛 Bug report
+description: Create a report to help us improve
+title: "[BUG]
"
+labels: ['bug, untriaged']
+body:
+ - type: textarea
+ attributes:
+ label: Describe the bug
+ description: A clear and concise description of what the bug is.
+ validations:
+ required: true
+ - type: dropdown
+ attributes:
+ label: Related component
+ description: Choose a specific OpenSearch component your bug belongs to. If you are unsure which to select or if the component is not present, select "Other".
+ multiple: false
+ options:
+ - Other
+ - Build
+ - Clients
+ - Cluster Manager
+ - Extensions
+ - Indexing:Performance
+ - Indexing:Replication
+ - Indexing
+ - Libraries
+ - Plugins
+ - Search:Aggregations
+ - Search:Performance
+ - Search:Query Capabilities
+ - Search:Query Insights
+ - Search:Relevance
+ - Search:Remote Search
+ - Search:Resiliency
+ - Search:Searchable Snapshots
+ - Search
+ - Storage:Durability
+ - Storage:Performance
+ - Storage:Remote
+ - Storage:Snapshots
+ - Storage
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: To Reproduce
+ description: Steps to reproduce the behavior.
+ value: |
+ 1. Go to '...'
+ 2. Click on '....'
+ 3. Scroll down to '....'
+ 4. See error
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: Expected behavior
+ description: A clear and concise description of what you expected to happen.
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: Additional Details
+ description: Add any other context about the problem here.
+ value: |
+ **Plugins**
+ Please list all plugins currently enabled.
+
+ **Screenshots**
+ If applicable, add screenshots to help explain your problem.
+
+ **Host/Environment (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Version [e.g. 22]
+
+ **Additional context**
+ Add any other context about the problem here.
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 53b3614a34342..0000000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-name: 🎆 Feature request
-about: Suggest an idea for this project
-title: ''
-labels: 'enhancement, untriaged'
-assignees: ''
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context or screenshots about the feature request here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000000000..d93ac8b590706
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,62 @@
+name: 🎆 Feature request
+description: Suggest an idea for this project
+title: '[Feature Request] '
+labels: ['enhancement, untriaged']
+body:
+ - type: textarea
+ attributes:
+ label: Is your feature request related to a problem? Please describe
+ description: A clear and concise description of what the problem is.
+ placeholder: Ex. I'm always frustrated when [...]
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: Describe the solution you'd like
+ description: A clear and concise description of what you want to happen.
+ validations:
+ required: true
+ - type: dropdown
+ attributes:
+ label: Related component
+ description: Choose a specific OpenSearch component your feature request belongs to. If you are unsure of which component to select or if the component is not present, select "Other".
+ multiple: false
+ options:
+ - Other
+ - Build
+ - Clients
+ - Cluster Manager
+ - Extensions
+ - Indexing:Performance
+ - Indexing:Replication
+ - Indexing
+ - Libraries
+ - Plugins
+ - Search:Aggregations
+ - Search:Performance
+ - Search:Query Capabilities
+ - Search:Query Insights
+ - Search:Relevance
+ - Search:Remote Search
+ - Search:Resiliency
+ - Search:Searchable Snapshots
+ - Search
+ - Storage:Durability
+ - Storage:Performance
+ - Storage:Remote
+ - Storage:Snapshots
+ - Storage
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: Describe alternatives you've considered
+ description: A clear and concise description of any alternative solutions or features you've considered.
+ validations:
+ required: false
+ - type: textarea
+ attributes:
+ label: Additional context
+ description: Add any other context or screenshots about the feature request here.
+ validations:
+ required: false
diff --git a/.github/workflows/add-untriaged.yml b/.github/workflows/add-untriaged.yml
deleted file mode 100644
index 38de96f663051..0000000000000
--- a/.github/workflows/add-untriaged.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Apply 'untriaged' label during issue lifecycle
-
-on:
- issues:
- types: [opened, reopened, transferred]
-
-jobs:
- apply-label:
- if: github.repository == 'opensearch-project/OpenSearch'
- runs-on: ubuntu-latest
- steps:
- - uses: actions/github-script@v7
- with:
- script: |
- github.rest.issues.addLabels({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- labels: ['untriaged']
- })
diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml
index d93f7e73b91e7..d6c65ddd446cd 100644
--- a/.github/workflows/check-compatibility.yml
+++ b/.github/workflows/check-compatibility.yml
@@ -36,7 +36,7 @@ jobs:
echo "### Compatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Compatible component' $HOME/gradlew-check.out | sed -e 's/Compatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt"
- name: Upload results
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: results.txt
path: ${{ github.workspace }}/results.txt
@@ -48,7 +48,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download results
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
with:
name: results.txt
diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml
new file mode 100644
index 0000000000000..2f87afd372d90
--- /dev/null
+++ b/.github/workflows/maintainer-approval.yml
@@ -0,0 +1,33 @@
+name: Maintainers approval
+
+on:
+ pull_request_review:
+ types: [submitted]
+
+jobs:
+ maintainer-approved-check:
+ name: Minimum approval count
+ runs-on: ubuntu-latest
+ steps:
+ - id: find-maintainers
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ result-encoding: string
+ script: |
+ // Get the collaborators - filtered to maintainer permissions
+ const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ permission: 'maintain',
+ affiliation: 'all',
+ per_page: 100
+ });
+
+ return maintainersResponse.data.map(item => item.login).join(', ');
+
+ - uses: peternied/required-approval@v1.2
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ min-required: 1
+ required-approvers-list: ${{ steps.find-maintainers.outputs.result }}
diff --git a/.github/workflows/stalled.yml b/.github/workflows/stalled.yml
index 19ec9c9438bbe..d171332b402f1 100644
--- a/.github/workflows/stalled.yml
+++ b/.github/workflows/stalled.yml
@@ -17,7 +17,7 @@ jobs:
private_key: ${{ secrets.APP_PRIVATE_KEY }}
installation_id: 22958780
- name: Stale PRs
- uses: actions/stale@v8
+ uses: actions/stale@v9
with:
repo-token: ${{ steps.github_app_token.outputs.token }}
stale-pr-label: 'stalled'
diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml
new file mode 100644
index 0000000000000..c305818bdb0a9
--- /dev/null
+++ b/.github/workflows/triage.yml
@@ -0,0 +1,34 @@
+name: Auto triage based on the component label in issue
+
+on:
+ issues:
+ types: [opened, reopened, transferred]
+
+jobs:
+ apply-label:
+ if: github.repository == 'opensearch-project/OpenSearch'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/github-script@v7
+ with:
+ script: |
+ const { issue, repository } = context.payload;
+ const { number, body } = issue;
+ const { owner, name } = repository;
+ const regex = /###\sRelated\scomponent\n\n(\w.*)\n/gm;
+ let match;
+ while ( ( match = regex.exec( body ) ) ) {
+ const [ , component_label ] = match;
+ await github.rest.issues.addLabels( {
+ owner: owner.login,
+ repo: name,
+ issue_number: number,
+ labels: [ `${ component_label }` ],
+ } );
+ }
+ github.rest.issues.addLabels({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ['untriaged']
+ })
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a1a864cbd2e33..070f1d819ff90 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,7 +9,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636))
- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151))
- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854))
-- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107))
- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664))
- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618))
- [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286))
@@ -45,7 +44,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247))
- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247))
- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963))
-- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822))
- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147))
- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617))
- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305))
@@ -58,7 +56,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792))
- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855))
- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/))
-- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558))
- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894))
@@ -84,8 +81,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827))
- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944))
- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993))
-- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873))
-- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249))
+- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439))
### Security
@@ -121,18 +117,23 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317))
- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069))
- [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175))
+- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378))
+- Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170))
+- Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591))
+- Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583))
### Dependencies
+- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822))
- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276))
-- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446))
+- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560))
- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298))
-- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295))
+- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.6.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630))
- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506))
- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508))
- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639))
- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297))
- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635))
-- Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637))
+- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632))
- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270))
- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504))
- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171))
@@ -143,30 +144,42 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861))
- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344))
- Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350))
-- Bump `com.gradle.enterprise` from 3.14.1 to 3.15.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339))
+- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629))
- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447))
- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450))
- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445))
- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448))
+- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521))
+- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539))
+- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555))
+- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556))
+- Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557))
+- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671))
+- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996))
+- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559))
### Changed
- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840))
- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036))
- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562))
+- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107))
- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598))
- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524))
- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642))
- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395))
+- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558))
- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273))
- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895))
-- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023))
- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057))
+- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023))
- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083))
- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087))
- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528))
+- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312))
- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308))
- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362))
-- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312))
+- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512))
+- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562))
### Deprecated
@@ -179,14 +192,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370))
- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496))
- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737))
-- Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068))
- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543))
- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934))
-- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152))
+- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068))
+- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873))
+- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249))
+- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316))
- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369))
- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167))
- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238))
+- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152))
- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417))
+- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609))
### Security
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index b8db8504d5b85..40ab0801223b1 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -103,10 +103,10 @@ dependencies {
api localGroovy()
api 'commons-codec:commons-codec:1.16.0'
- api 'org.apache.commons:commons-compress:1.24.0'
+ api 'org.apache.commons:commons-compress:1.25.0'
api 'org.apache.ant:ant:1.10.14'
api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0'
- api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0'
+ api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0'
api 'com.netflix.nebula:gradle-info-plugin:12.1.6'
api 'org.apache.rat:apache-rat:0.15'
api 'commons-io:commons-io:2.15.1'
@@ -122,21 +122,17 @@ dependencies {
api 'org.jruby.jcodings:jcodings:1.0.58'
api 'org.jruby.joni:joni:2.2.1'
api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}"
- api "org.ajoberstar.grgit:grgit-core:5.2.0"
+ api "org.ajoberstar.grgit:grgit-core:5.2.1"
testFixturesApi "junit:junit:${props.getProperty('junit')}"
testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
testFixturesApi gradleApi()
testFixturesApi gradleTestKit()
- testImplementation 'org.wiremock:wiremock-standalone:3.1.0'
+ testImplementation 'org.wiremock:wiremock-standalone:3.3.1'
testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}"
integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') {
exclude module: "groovy"
}
- implementation('org.ajoberstar.grgit:grgit-core:5.2.0') {
- exclude group: 'org.eclipse.jgit', module: 'org.eclipse.jgit'
- }
- implementation 'org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r'
}
configurations.all {
diff --git a/client/rest/build.gradle b/client/rest/build.gradle
index ff3c322c5ccf7..f18df65dfddfa 100644
--- a/client/rest/build.gradle
+++ b/client/rest/build.gradle
@@ -34,8 +34,8 @@ apply plugin: 'opensearch.build'
apply plugin: 'opensearch.publish'
java {
- targetCompatibility = JavaVersion.VERSION_11
- sourceCompatibility = JavaVersion.VERSION_11
+ targetCompatibility = JavaVersion.VERSION_1_8
+ sourceCompatibility = JavaVersion.VERSION_1_8
}
base {
@@ -109,3 +109,10 @@ thirdPartyAudit.ignoreMissingClasses(
'javax.servlet.ServletContextEvent',
'javax.servlet.ServletContextListener'
)
+
+tasks.withType(JavaCompile) {
+ // Suppressing '[options] target value 8 is obsolete and will be removed in a future release'
+ configure(options) {
+ options.compilerArgs << '-Xlint:-options'
+ }
+}
diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java
index 7691c01daefea..15905add76c4f 100644
--- a/client/rest/src/main/java/org/opensearch/client/RestClient.java
+++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java
@@ -1116,9 +1116,15 @@ public long getContentLength() {
if (chunkedEnabled.get()) {
return -1L;
} else {
- long size;
+ long size = 0;
+ final byte[] buf = new byte[8192];
+ int nread = 0;
+
try (InputStream is = getContent()) {
- size = is.readAllBytes().length;
+ // read to EOF which may read more or less than buffer size
+ while ((nread = is.read(buf)) > 0) {
+ size += nread;
+ }
} catch (IOException ex) {
size = -1L;
}
diff --git a/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java b/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java
index 6a4b176edd011..fdfe49ca901c9 100644
--- a/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java
+++ b/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java
@@ -35,34 +35,34 @@ public void tearDown() {
}
public void testConsumerAllocatesBufferLimit() throws IOException {
- consumer.consume(randomByteBufferOfLength(1000).flip());
+ consumer.consume((ByteBuffer) randomByteBufferOfLength(1000).flip());
assertThat(consumer.getBuffer().capacity(), equalTo(1000));
}
public void testConsumerAllocatesEmptyBuffer() throws IOException {
- consumer.consume(ByteBuffer.allocate(0).flip());
+ consumer.consume((ByteBuffer) ByteBuffer.allocate(0).flip());
assertThat(consumer.getBuffer().capacity(), equalTo(0));
}
public void testConsumerExpandsBufferLimits() throws IOException {
- consumer.consume(randomByteBufferOfLength(1000).flip());
- consumer.consume(randomByteBufferOfLength(2000).flip());
- consumer.consume(randomByteBufferOfLength(3000).flip());
+ consumer.consume((ByteBuffer) randomByteBufferOfLength(1000).flip());
+ consumer.consume((ByteBuffer) randomByteBufferOfLength(2000).flip());
+ consumer.consume((ByteBuffer) randomByteBufferOfLength(3000).flip());
assertThat(consumer.getBuffer().capacity(), equalTo(6000));
}
public void testConsumerAllocatesLimit() throws IOException {
- consumer.consume(randomByteBufferOfLength(BUFFER_LIMIT).flip());
+ consumer.consume((ByteBuffer) randomByteBufferOfLength(BUFFER_LIMIT).flip());
assertThat(consumer.getBuffer().capacity(), equalTo(BUFFER_LIMIT));
}
public void testConsumerFailsToAllocateOverLimit() throws IOException {
- assertThrows(ContentTooLongException.class, () -> consumer.consume(randomByteBufferOfLength(BUFFER_LIMIT + 1).flip()));
+ assertThrows(ContentTooLongException.class, () -> consumer.consume((ByteBuffer) randomByteBufferOfLength(BUFFER_LIMIT + 1).flip()));
}
public void testConsumerFailsToExpandOverLimit() throws IOException {
- consumer.consume(randomByteBufferOfLength(BUFFER_LIMIT).flip());
- assertThrows(ContentTooLongException.class, () -> consumer.consume(randomByteBufferOfLength(1).flip()));
+ consumer.consume((ByteBuffer) randomByteBufferOfLength(BUFFER_LIMIT).flip());
+ assertThrows(ContentTooLongException.class, () -> consumer.consume((ByteBuffer) randomByteBufferOfLength(1).flip()));
}
private static ByteBuffer randomByteBufferOfLength(int length) {
diff --git a/client/test/build.gradle b/client/test/build.gradle
index f81a009389681..b77865df6decf 100644
--- a/client/test/build.gradle
+++ b/client/test/build.gradle
@@ -30,8 +30,8 @@
apply plugin: 'opensearch.build'
java {
- targetCompatibility = JavaVersion.VERSION_11
- sourceCompatibility = JavaVersion.VERSION_11
+ targetCompatibility = JavaVersion.VERSION_1_8
+ sourceCompatibility = JavaVersion.VERSION_1_8
}
base {
@@ -69,3 +69,10 @@ dependenciesInfo.enabled = false
//we aren't releasing this jar
thirdPartyAudit.enabled = false
test.enabled = false
+
+tasks.withType(JavaCompile) {
+ // Suppressing '[options] target value 8 is obsolete and will be removed in a future release'
+ configure(options) {
+ options.compilerArgs << '-Xlint:-options'
+ }
+}
diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle
index cb05661dc74a4..ededa7bff34d8 100644
--- a/distribution/packages/build.gradle
+++ b/distribution/packages/build.gradle
@@ -63,7 +63,7 @@ import java.util.regex.Pattern
*/
plugins {
- id "com.netflix.nebula.ospackage-base" version "11.5.0"
+ id "com.netflix.nebula.ospackage-base" version "11.6.0"
}
void addProcessFilesTask(String type, boolean jdk) {
diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options
index 1a0abcbaf9c88..f0ac98faffda9 100644
--- a/distribution/src/config/jvm.options
+++ b/distribution/src/config/jvm.options
@@ -38,12 +38,12 @@
8-10:-XX:+UseCMSInitiatingOccupancyOnly
## G1GC Configuration
-# NOTE: G1 GC is only supported on JDK version 10 or later
-# to use G1GC, uncomment the next two lines and update the version on the
-# following three lines to your version of the JDK
-# 10:-XX:-UseConcMarkSweepGC
-# 10:-XX:-UseCMSInitiatingOccupancyOnly
+# NOTE: G1GC is the default GC for all JDKs 11 and newer
11-:-XX:+UseG1GC
+# See https://github.com/elastic/elasticsearch/pull/46169 for the history
+# behind these settings, but the tl;dr is that default values can lead
+# to situations where heap usage grows enough to trigger a circuit breaker
+# before GC kicks in.
11-:-XX:G1ReservePercent=25
11-:-XX:InitiatingHeapOccupancyPercent=30
diff --git a/libs/core/build.gradle b/libs/core/build.gradle
index 4850b5aea5c85..0cf2cd0bf92b6 100644
--- a/libs/core/build.gradle
+++ b/libs/core/build.gradle
@@ -36,45 +36,6 @@ base {
archivesName = 'opensearch-core'
}
-// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs
-if (!isEclipse) {
- sourceSets {
- java11 {
- java {
- srcDirs = ['src/main/java11']
- }
- }
- }
-
- configurations {
- java11Compile.extendsFrom(compile)
- }
-
- dependencies {
- java11Implementation sourceSets.main.output
- }
-
- compileJava11Java {
- sourceCompatibility = JavaVersion.VERSION_11
- targetCompatibility = JavaVersion.VERSION_11
- }
-
- forbiddenApisJava11 {
- if (BuildParams.runtimeJavaVersion < JavaVersion.VERSION_11) {
- targetCompatibility = JavaVersion.VERSION_11
- }
- replaceSignatureFiles 'jdk-signatures'
- }
-
- jar {
- metaInf {
- into 'versions/11'
- from sourceSets.java11.output
- }
- manifest.attributes('Multi-Release': 'true')
- }
-}
-
dependencies {
api project(':libs:opensearch-common')
diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java
index decbf49f795c4..93600da510977 100644
--- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java
+++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java
@@ -21,6 +21,7 @@
class DefaultSpanScope implements SpanScope {
private final Span span;
private final SpanScope previousSpanScope;
+ private final Span beforeSpan;
private static final ThreadLocal spanScopeThreadLocal = new ThreadLocal<>();
private final TracerContextStorage tracerContextStorage;
@@ -29,8 +30,14 @@ class DefaultSpanScope implements SpanScope {
* @param span span
* @param previousSpanScope before attached span scope.
*/
- private DefaultSpanScope(Span span, SpanScope previousSpanScope, TracerContextStorage tracerContextStorage) {
+ private DefaultSpanScope(
+ Span span,
+ final Span beforeSpan,
+ SpanScope previousSpanScope,
+ TracerContextStorage tracerContextStorage
+ ) {
this.span = Objects.requireNonNull(span);
+ this.beforeSpan = beforeSpan;
this.previousSpanScope = previousSpanScope;
this.tracerContextStorage = tracerContextStorage;
}
@@ -43,7 +50,8 @@ private DefaultSpanScope(Span span, SpanScope previousSpanScope, TracerContextSt
*/
public static SpanScope create(Span span, TracerContextStorage tracerContextStorage) {
final SpanScope beforeSpanScope = spanScopeThreadLocal.get();
- SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpanScope, tracerContextStorage);
+ final Span beforeSpan = tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN);
+ SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpan, beforeSpanScope, tracerContextStorage);
return newSpanScope;
}
@@ -61,8 +69,8 @@ public SpanScope attach() {
private void detach() {
spanScopeThreadLocal.set(previousSpanScope);
- if (previousSpanScope != null) {
- tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, previousSpanScope.getSpan());
+ if (beforeSpan != null) {
+ tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, beforeSpan);
} else {
tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, null);
}
diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle
index e126cf37e33a2..45b2ff74a5826 100644
--- a/modules/ingest-geoip/build.gradle
+++ b/modules/ingest-geoip/build.gradle
@@ -39,7 +39,7 @@ opensearchplugin {
}
dependencies {
- api('com.maxmind.geoip2:geoip2:4.1.0')
+ api('com.maxmind.geoip2:geoip2:4.2.0')
// geoip2 dependencies:
api('com.maxmind.db:maxmind-db:3.0.0')
api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}")
diff --git a/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1
deleted file mode 100644
index 0d124299e4cfb..0000000000000
--- a/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b6b356cc91863409ba3475a148ee11a3a6d6aa4b
\ No newline at end of file
diff --git a/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1
new file mode 100644
index 0000000000000..b6bfeeb9da60b
--- /dev/null
+++ b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1
@@ -0,0 +1 @@
+78ff932dc13ac41dd1f0fd9e7405a7f4ad815ce0
\ No newline at end of file
diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java
index 434a117d9b47e..6b33ac3b6be08 100644
--- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java
+++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java
@@ -65,6 +65,7 @@ public ScriptEngine getScriptEngine(Settings settings, Collection> getActions() {
return Arrays.asList(
new ActionHandler<>(SearchTemplateAction.INSTANCE, TransportSearchTemplateAction.class),
+ new ActionHandler<>(RenderSearchTemplateAction.INSTANCE, TransportRenderSearchTemplateAction.class),
new ActionHandler<>(MultiSearchTemplateAction.INSTANCE, TransportMultiSearchTemplateAction.class)
);
}
diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RenderSearchTemplateAction.java
new file mode 100644
index 0000000000000..1feb916c4ce73
--- /dev/null
+++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RenderSearchTemplateAction.java
@@ -0,0 +1,21 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.script.mustache;
+
+import org.opensearch.action.ActionType;
+
+public class RenderSearchTemplateAction extends ActionType {
+
+ public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction();
+ public static final String NAME = "indices:data/read/search/template/render";
+
+ private RenderSearchTemplateAction() {
+ super(NAME, SearchTemplateResponse::new);
+ }
+}
diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java
index 7a94fc45837d9..9ffa2c94cb56f 100644
--- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java
+++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java
@@ -81,6 +81,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client
renderRequest.setScript(id);
}
- return channel -> client.execute(SearchTemplateAction.INSTANCE, renderRequest, new RestToXContentListener<>(channel));
+ return channel -> client.execute(RenderSearchTemplateAction.INSTANCE, renderRequest, new RestToXContentListener<>(channel));
}
}
diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java
index 1aabea30fc651..d02c5f1efa591 100644
--- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java
+++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java
@@ -259,16 +259,25 @@ public void writeTo(StreamOutput out) throws IOException {
@Override
public String[] indices() {
+ if (request == null) {
+ return new String[0];
+ }
return request.indices();
}
@Override
public IndicesOptions indicesOptions() {
+ if (request == null) {
+ return SearchRequest.DEFAULT_INDICES_OPTIONS;
+ }
return request.indicesOptions();
}
@Override
public IndicesRequest indices(String... indices) {
+ if (request == null) {
+ return new SearchRequest(new String[0]).indices(indices);
+ }
return request.indices(indices);
}
}
diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportRenderSearchTemplateAction.java
new file mode 100644
index 0000000000000..993d77ffaa75c
--- /dev/null
+++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportRenderSearchTemplateAction.java
@@ -0,0 +1,30 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.script.mustache;
+
+import org.opensearch.action.support.ActionFilters;
+import org.opensearch.client.node.NodeClient;
+import org.opensearch.common.inject.Inject;
+import org.opensearch.core.xcontent.NamedXContentRegistry;
+import org.opensearch.script.ScriptService;
+import org.opensearch.transport.TransportService;
+
+public class TransportRenderSearchTemplateAction extends TransportSearchTemplateAction {
+
+ @Inject
+ public TransportRenderSearchTemplateAction(
+ TransportService transportService,
+ ActionFilters actionFilters,
+ ScriptService scriptService,
+ NamedXContentRegistry xContentRegistry,
+ NodeClient client
+ ) {
+ super(RenderSearchTemplateAction.NAME, transportService, actionFilters, scriptService, xContentRegistry, client);
+ }
+}
diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java
index 6e8b9d059b583..d75cc0337b66c 100644
--- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java
+++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java
@@ -61,9 +61,9 @@ public class TransportSearchTemplateAction extends HandledTransportAction listener) {
final SearchTemplateResponse response = new SearchTemplateResponse();
diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java
index 72443d1323b44..71ce616fd5d94 100644
--- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java
+++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java
@@ -32,6 +32,7 @@
package org.opensearch.script.mustache;
+import org.opensearch.action.search.SearchRequest;
import org.opensearch.core.common.io.stream.Writeable;
import org.opensearch.script.ScriptType;
import org.opensearch.search.RandomSearchRequestGenerator;
@@ -110,4 +111,19 @@ public static SearchTemplateRequest createRandomRequest() {
request.setRequest(RandomSearchRequestGenerator.randomSearchRequest(SearchSourceBuilder::searchSource));
return request;
}
+
+ public void testSimulatedSearchTemplateRequest() {
+ SearchTemplateRequest request = new SearchTemplateRequest();
+ request.setSimulate(true);
+
+ assertEquals(0, request.indices().length);
+ assertEquals(SearchRequest.DEFAULT_INDICES_OPTIONS, request.indicesOptions());
+ assertEquals(2, request.indices("index1", "index2").indices().length);
+
+ SearchTemplateRequest randomRequest = createRandomRequest();
+ int expectedIndicesLength = randomRequest.indices().length;
+ request.setSimulate(true);
+
+ assertEquals(expectedIndicesLength, randomRequest.indices().length);
+ }
}
diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle
index 16f2d2c5f23c6..c3d70e9c64968 100644
--- a/plugins/discovery-azure-classic/build.gradle
+++ b/plugins/discovery-azure-classic/build.gradle
@@ -53,7 +53,7 @@ dependencies {
api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}"
api "commons-codec:commons-codec:${versions.commonscodec}"
api "commons-lang:commons-lang:2.6"
- api "commons-io:commons-io:2.15.0"
+ api "commons-io:commons-io:2.15.1"
api 'javax.mail:mail:1.4.7'
api 'javax.inject:javax.inject:1'
api "com.sun.jersey:jersey-client:${versions.jersey}"
diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.15.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.15.0.jar.sha1
deleted file mode 100644
index 73709383fd130..0000000000000
--- a/plugins/discovery-azure-classic/licenses/commons-io-2.15.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5c3c2db10f6f797430a7f9c696b4d1273768c924
\ No newline at end of file
diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1
new file mode 100644
index 0000000000000..47c5d13812a36
--- /dev/null
+++ b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1
@@ -0,0 +1 @@
+f11560da189ab563a5c8e351941415430e9304ea
\ No newline at end of file
diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java
index 51f0ad9526e55..a2e920761b655 100644
--- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java
+++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java
@@ -99,7 +99,7 @@ protected Ec2Client buildClient(
if (Strings.hasText(endpoint)) {
logger.debug("using explicit ec2 endpoint [{}]", endpoint);
- builder.endpointOverride(URI.create(endpoint));
+ builder.endpointOverride(URI.create(getFullEndpoint(endpoint)));
}
if (Strings.hasText(region)) {
@@ -110,6 +110,19 @@ protected Ec2Client buildClient(
return SocketAccess.doPrivileged(builder::build);
}
+ protected String getFullEndpoint(String endpoint) {
+ if (!Strings.hasText(endpoint)) {
+ return null;
+ }
+ if (endpoint.startsWith("http://") || endpoint.startsWith("https://")) {
+ return endpoint;
+ }
+
+ // if no scheme is provided, default to https
+ logger.debug("no scheme found in endpoint [{}], defaulting to https", endpoint);
+ return "https://" + endpoint;
+ }
+
static ProxyConfiguration buildProxyConfiguration(Logger logger, Ec2ClientSettings clientSettings) {
if (Strings.hasText(clientSettings.proxyHost)) {
try {
diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java
index 81310f7e2e3c3..3164abe456515 100644
--- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java
+++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java
@@ -202,4 +202,32 @@ public void testAWSConfigurationWithAwsSettings() {
assertTrue(clientOverrideConfiguration.retryPolicy().isPresent());
assertThat(clientOverrideConfiguration.retryPolicy().get().numRetries(), is(10));
}
+
+ public void testGetFullEndpointWithScheme() {
+ final Settings settings = Settings.builder().put("discovery.ec2.endpoint", "http://ec2.us-west-2.amazonaws.com").build();
+ Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings);
+
+ AwsEc2ServiceImpl awsEc2ServiceImpl = new AwsEc2ServiceImpl();
+
+ String endpoint = awsEc2ServiceImpl.getFullEndpoint(clientSettings.endpoint);
+ assertEquals("http://ec2.us-west-2.amazonaws.com", endpoint);
+
+ assertEquals("http://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("http://httpserver.example.com"));
+
+ assertEquals("https://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("https://httpserver.example.com"));
+ }
+
+ public void testGetFullEndpointWithoutScheme() {
+ final Settings settings = Settings.builder().put("discovery.ec2.endpoint", "ec2.us-west-2.amazonaws.com").build();
+ Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings);
+
+ AwsEc2ServiceImpl awsEc2ServiceImpl = new AwsEc2ServiceImpl();
+
+ String endpoint = awsEc2ServiceImpl.getFullEndpoint(clientSettings.endpoint);
+ assertEquals("https://ec2.us-west-2.amazonaws.com", endpoint);
+
+ assertEquals("https://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("httpserver.example.com"));
+
+ assertNull(awsEc2ServiceImpl.getFullEndpoint(""));
+ }
}
diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle
index 57a2493053956..22db73ad86796 100644
--- a/plugins/ingest-attachment/build.gradle
+++ b/plugins/ingest-attachment/build.gradle
@@ -57,7 +57,7 @@ dependencies {
runtimeOnly "com.google.guava:guava:${versions.guava}"
// Other dependencies
api 'org.tukaani:xz:1.9'
- api 'commons-io:commons-io:2.15.0'
+ api 'commons-io:commons-io:2.15.1'
api "org.slf4j:slf4j-api:${versions.slf4j}"
// character set detection
diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1
deleted file mode 100644
index 73709383fd130..0000000000000
--- a/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5c3c2db10f6f797430a7f9c696b4d1273768c924
\ No newline at end of file
diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1
new file mode 100644
index 0000000000000..47c5d13812a36
--- /dev/null
+++ b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1
@@ -0,0 +1 @@
+f11560da189ab563a5c8e351941415430e9304ea
\ No newline at end of file
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index ed1f54888a26f..7736390d58fe1 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -70,7 +70,7 @@ dependencies {
api 'com.google.code.gson:gson:2.10.1'
runtimeOnly "com.google.guava:guava:${versions.guava}"
api "commons-logging:commons-logging:${versions.commonslogging}"
- api 'commons-cli:commons-cli:1.5.0'
+ api 'commons-cli:commons-cli:1.6.0'
api "commons-codec:commons-codec:${versions.commonscodec}"
api 'commons-collections:commons-collections:3.2.2'
api "org.apache.commons:commons-compress:${versions.commonscompress}"
diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1
deleted file mode 100644
index 8f9e064eda2d0..0000000000000
--- a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dc98be5d5390230684a092589d70ea76a147925c
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1
new file mode 100644
index 0000000000000..bb94eda6814ea
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1
@@ -0,0 +1 @@
+38166a23afb5bd5520f739b87b3be87f7f0fb96d
\ No newline at end of file
diff --git a/release-notes/opensearch.release-notes-1.3.14.md b/release-notes/opensearch.release-notes-1.3.14.md
new file mode 100644
index 0000000000000..319f5a79781c7
--- /dev/null
+++ b/release-notes/opensearch.release-notes-1.3.14.md
@@ -0,0 +1,18 @@
+## 2023-12-12 Version 1.3.14 Release Notes
+
+### Upgrades
+- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302))
+- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303))
+- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564))
+- Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294))
+- Bump `org.apache.zookeeper:zookeper` from 3.8.0 to 3.8.3 ([#11476](https://github.com/opensearch-project/OpenSearch/pull/11476))
+- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297))
+- Bump `org.apache.avro:avro` from 1.10.2 to 1.11.3 ([#11502](https://github.com/opensearch-project/OpenSearch/pull/11502))
+- Bump `jetty` from 9.4.51.v20230217 to 9.4.52.v20230823 ([#11501](https://github.com/opensearch-project/OpenSearch/pull/11501))
+- Bump `io.projectreactor:reactor-core` from 3.4.23 to 3.4.34 and reactor-netty from 1.0.24 to 1.0.39 ([#11500](https://github.com/opensearch-project/OpenSearch/pull/11500))
+- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521))
+- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539))
+
+### Bug Fixes
+- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060))
+- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261))
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml
index 32ac11097d3dc..bac2898ccea1c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml
@@ -4,8 +4,8 @@
# number_of_shards for the target index.
- skip:
- version: " - 2.99.99"
- reason: "only available in 3.0+"
+ version: " - 2.4.99"
+ reason: "max_shard_size was introduced in 2.5.0"
features: allowed_warnings
- do:
diff --git a/server/build.gradle b/server/build.gradle
index bf0c7791aeb55..e36498bf1038b 100644
--- a/server/build.gradle
+++ b/server/build.gradle
@@ -57,45 +57,6 @@ sourceSets {
}
}
}
-// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs
-if (!isEclipse) {
- sourceSets {
- java11 {
- java {
- srcDirs = ['src/main/java11']
- }
- }
- }
-
- configurations {
- java11Implementation.extendsFrom(api)
- }
-
- dependencies {
- java11Implementation sourceSets.main.output
- }
-
- compileJava11Java {
- sourceCompatibility = JavaVersion.VERSION_11
- targetCompatibility = JavaVersion.VERSION_11
- }
-
- tasks.named('forbiddenApisJava11').configure {
- doFirst {
- if (BuildParams.runtimeJavaVersion < JavaVersion.VERSION_11) {
- targetCompatibility = JavaVersion.VERSION_11
- }
- }
- }
-
- jar {
- metaInf {
- into 'versions/11'
- from sourceSets.java11.output
- }
- manifest.attributes('Multi-Release': 'true')
- }
-}
dependencies {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java
index a82fd8d845709..c4e8ccfc0ecec 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java
@@ -10,7 +10,10 @@
import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.opensearch.action.admin.indices.shrink.ResizeType;
+import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.common.collect.Tuple;
import org.opensearch.common.settings.Settings;
import org.opensearch.core.index.Index;
import org.opensearch.index.IndexModule;
@@ -18,8 +21,15 @@
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.test.OpenSearchIntegTestCase;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE;
+import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE;
+import static org.hamcrest.Matchers.hasSize;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class SegmentReplicationClusterSettingIT extends OpenSearchIntegTestCase {
@@ -29,6 +39,9 @@ public class SegmentReplicationClusterSettingIT extends OpenSearchIntegTestCase
protected static final int SHARD_COUNT = 1;
protected static final int REPLICA_COUNT = 1;
+ protected static final String REPLICATION_MISMATCH_VALIDATION_ERROR =
+ "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];";
+
@Override
public Settings indexSettings() {
return Settings.builder()
@@ -44,14 +57,6 @@ protected boolean addMockInternalEngine() {
return false;
}
- @Override
- protected Settings nodeSettings(int nodeOrdinal) {
- return Settings.builder()
- .put(super.nodeSettings(nodeOrdinal))
- .put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT)
- .build();
- }
-
public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Exception {
Settings settings = Settings.builder().put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build();
final String ANOTHER_INDEX = "test-index";
@@ -123,4 +128,125 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex
assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false);
}
+ public void testReplicationTypesOverrideNotAllowed_IndexAPI() {
+ // Generate mutually exclusive replication strategies at cluster and index level
+ List replicationStrategies = getRandomReplicationTypesAsList();
+ ReplicationType clusterLevelReplication = replicationStrategies.get(0);
+ ReplicationType indexLevelReplication = replicationStrategies.get(1);
+ Settings nodeSettings = Settings.builder()
+ .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true)
+ .build();
+ internalCluster().startClusterManagerOnlyNode(nodeSettings);
+ internalCluster().startDataOnlyNode(nodeSettings);
+ Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, indexLevelReplication).build();
+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings));
+ assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage());
+ }
+
+ public void testReplicationTypesOverrideNotAllowed_WithTemplates() {
+ // Generate mutually exclusive replication strategies at cluster and index level
+ List replicationStrategies = getRandomReplicationTypesAsList();
+ ReplicationType clusterLevelReplication = replicationStrategies.get(0);
+ ReplicationType templateReplicationType = replicationStrategies.get(1);
+ Settings nodeSettings = Settings.builder()
+ .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true)
+ .build();
+ internalCluster().startClusterManagerOnlyNode(nodeSettings);
+ internalCluster().startDataOnlyNode(nodeSettings);
+ internalCluster().startDataOnlyNode(nodeSettings);
+ logger.info(
+ "--> Create index with template replication {} and cluster level replication {}",
+ templateReplicationType,
+ clusterLevelReplication
+ );
+ // Create index template
+ client().admin()
+ .indices()
+ .preparePutTemplate("template_1")
+ .setPatterns(Collections.singletonList("test-idx*"))
+ .setSettings(Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, templateReplicationType).build())
+ .setOrder(0)
+ .get();
+
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME));
+ assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage());
+ }
+
+ public void testReplicationTypesOverrideNotAllowed_WithResizeAction() {
+ // Generate mutually exclusive replication strategies at cluster and index level
+ List replicationStrategies = getRandomReplicationTypesAsList();
+ ReplicationType clusterLevelReplication = replicationStrategies.get(0);
+ ReplicationType indexLevelReplication = replicationStrategies.get(1);
+ Settings nodeSettings = Settings.builder()
+ .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true)
+ .build();
+ internalCluster().startClusterManagerOnlyNode(nodeSettings);
+ internalCluster().startDataOnlyNode(nodeSettings);
+ internalCluster().startDataOnlyNode(nodeSettings);
+ logger.info(
+ "--> Create index with index level replication {} and cluster level replication {}",
+ indexLevelReplication,
+ clusterLevelReplication
+ );
+
+ // Define resize action and target shard count.
+ List> resizeActionsList = new ArrayList<>();
+ final int initialShardCount = 2;
+ resizeActionsList.add(new Tuple<>(ResizeType.SPLIT, 2 * initialShardCount));
+ resizeActionsList.add(new Tuple<>(ResizeType.SHRINK, SHARD_COUNT));
+ resizeActionsList.add(new Tuple<>(ResizeType.CLONE, initialShardCount));
+
+ Tuple resizeActionTuple = resizeActionsList.get(random().nextInt(resizeActionsList.size()));
+ final String targetIndexName = resizeActionTuple.v1().name().toLowerCase(Locale.ROOT) + "-target";
+
+ logger.info("--> Performing resize action {} with shard count {}", resizeActionTuple.v1(), resizeActionTuple.v2());
+
+ Settings indexSettings = Settings.builder()
+ .put(indexSettings())
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, initialShardCount)
+ .put(SETTING_REPLICATION_TYPE, clusterLevelReplication)
+ .build();
+ createIndex(INDEX_NAME, indexSettings);
+
+ // Block writes
+ client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put("index.blocks.write", true)).get();
+ ensureGreen();
+
+ // Validate resize action fails
+ IllegalArgumentException exception = expectThrows(
+ IllegalArgumentException.class,
+ () -> client().admin()
+ .indices()
+ .prepareResizeIndex(INDEX_NAME, targetIndexName)
+ .setResizeType(resizeActionTuple.v1())
+ .setSettings(
+ Settings.builder()
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", resizeActionTuple.v2())
+ .putNull("index.blocks.write")
+ .put(SETTING_REPLICATION_TYPE, indexLevelReplication)
+ .build()
+ )
+ .get()
+ );
+ assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage());
+ }
+
+ /**
+ * Generate a list of ReplicationType with random ordering
+ *
+ * @return List of ReplicationType values
+ */
+ private List getRandomReplicationTypesAsList() {
+ List replicationStrategies = List.of(ReplicationType.SEGMENT, ReplicationType.DOCUMENT);
+ int randomReplicationIndex = random().nextInt(replicationStrategies.size());
+ ReplicationType clusterLevelReplication = replicationStrategies.get(randomReplicationIndex);
+ ReplicationType indexLevelReplication = replicationStrategies.get(1 - randomReplicationIndex);
+ return List.of(clusterLevelReplication, indexLevelReplication);
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java
index c2ce7e48f92d2..2c12c0abb202b 100644
--- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java
@@ -31,6 +31,7 @@
import java.util.concurrent.TimeUnit;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE;
+import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE;
import static org.opensearch.indices.replication.SegmentReplicationBaseIT.waitForSearchableDocs;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@@ -47,6 +48,9 @@ public class SegmentReplicationSnapshotIT extends AbstractSnapshotIntegTestCase
private static final String REPOSITORY_NAME = "test-segrep-repo";
private static final String SNAPSHOT_NAME = "test-segrep-snapshot";
+ protected static final String REPLICATION_MISMATCH_VALIDATION_ERROR =
+ "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];";
+
public Settings segRepEnableIndexSettings() {
return getShardSettings().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build();
}
@@ -306,4 +310,63 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio
IndicesService indicesService = internalCluster().getInstance(IndicesService.class);
assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false);
}
+
+ /**
+ * 1. Create index in DOCUMENT replication type
+ * 2. Snapshot index
+ * 3. Add new set of nodes with `cluster.indices.replication.strategy` set to SEGMENT and `cluster.index.restrict.replication.type`
+ * set to true.
+ * 4. Perform restore on new set of nodes to validate restored index has `DOCUMENT` replication.
+ */
+ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception {
+ final int documentCount = scaledRandomIntBetween(1, 10);
+ String originalClusterManagerNode = internalCluster().startClusterManagerOnlyNode();
+
+ // Starting two nodes with primary and replica shards respectively.
+ final String primaryNode = internalCluster().startDataOnlyNode();
+ prepareCreate(
+ INDEX_NAME,
+ Settings.builder()
+ // we want to override cluster replication setting by passing a index replication setting
+ .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT)
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT)
+ ).get();
+ ensureYellowAndNoInitializingShards(INDEX_NAME);
+ final String replicaNode = internalCluster().startDataOnlyNode();
+ ensureGreen(INDEX_NAME);
+
+ for (int i = 0; i < documentCount; i++) {
+ client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get();
+ }
+
+ createSnapshot();
+
+ // Delete index
+ assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get());
+ assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME));
+
+ // Start new set of nodes with cluster level replication type setting and restrict replication type setting.
+ Settings settings = Settings.builder()
+ .put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true)
+ .build();
+
+ // Start new cluster manager node
+ String newClusterManagerNode = internalCluster().startClusterManagerOnlyNode(settings);
+
+ // Remove older nodes
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(originalClusterManagerNode));
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode));
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
+
+ String newPrimaryNode = internalCluster().startDataOnlyNode(settings);
+ String newReplicaNode = internalCluster().startDataOnlyNode(settings);
+
+ // Perform snapshot restore
+ logger.info("--> Performing snapshot restore to target index");
+
+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> restoreSnapshotWithSettings(null));
+ assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage());
+ }
}
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java
index 55f75a142a53c..7e4911c10c50e 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java
@@ -44,7 +44,7 @@ public final class RemoteInfoAction extends ActionType {
public static final String NAME = "cluster:monitor/remote/info";
public static final RemoteInfoAction INSTANCE = new RemoteInfoAction();
- public RemoteInfoAction() {
+ private RemoteInfoAction() {
super(NAME, RemoteInfoResponse::new);
}
}
diff --git a/server/src/main/java/org/opensearch/action/main/MainAction.java b/server/src/main/java/org/opensearch/action/main/MainAction.java
index c5cbac824ec83..28a31a92d7f16 100644
--- a/server/src/main/java/org/opensearch/action/main/MainAction.java
+++ b/server/src/main/java/org/opensearch/action/main/MainAction.java
@@ -44,7 +44,7 @@ public class MainAction extends ActionType {
public static final String NAME = "cluster:monitor/main";
public static final MainAction INSTANCE = new MainAction();
- public MainAction() {
+ private MainAction() {
super(NAME, MainResponse::new);
}
}
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
index bb1bf94f5e984..3384393d8feaf 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
@@ -907,6 +907,10 @@ static Settings aggregateIndexSettings(
);
}
+ List validationErrors = new ArrayList<>();
+ validateIndexReplicationTypeSettings(indexSettingsBuilder.build(), clusterSettings).ifPresent(validationErrors::add);
+ validateErrors(request.index(), validationErrors);
+
Settings indexSettings = indexSettingsBuilder.build();
/*
* We can not validate settings until we have applied templates, otherwise we do not know the actual settings
@@ -1246,7 +1250,11 @@ private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState
public void validateIndexSettings(String indexName, final Settings settings, final boolean forbidPrivateIndexSettings)
throws IndexCreationException {
List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings, indexName);
+ validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add);
+ validateErrors(indexName, validationErrors);
+ }
+ private static void validateErrors(String indexName, List validationErrors) {
if (validationErrors.isEmpty() == false) {
ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
@@ -1322,6 +1330,27 @@ private static List validateIndexCustomPath(Settings settings, @Nullable
return validationErrors;
}
+ /**
+ * Validates {@code index.replication.type} is matches with cluster level setting {@code cluster.indices.replication.strategy}
+ * when {@code cluster.index.restrict.replication.type} is set to true.
+ *
+ * @param requestSettings settings passed in during index create request
+ * @param clusterSettings cluster setting
+ */
+ private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) {
+ if (clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING)
+ && requestSettings.hasValue(SETTING_REPLICATION_TYPE)
+ && requestSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey())
+ .equals(clusterSettings.get(CLUSTER_REPLICATION_TYPE_SETTING).name()) == false) {
+ return Optional.of(
+ "index setting [index.replication.type] is not allowed to be set as ["
+ + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey()
+ + "=true]"
+ );
+ }
+ return Optional.empty();
+ }
+
/**
* Validates the settings and mappings for shrinking an index.
*
diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
index ab0ea89f4734d..fa4b0f475edc5 100644
--- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
@@ -701,7 +701,8 @@ public void apply(Settings value, Settings current, Settings previous) {
AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE,
CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE,
CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT,
- CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT
+ CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT,
+ IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING
)
)
);
diff --git a/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java b/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java
index 803d106a2f25e..c297022f5766d 100644
--- a/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java
+++ b/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java
@@ -45,6 +45,7 @@
final class IndexVersionValue extends VersionValue {
private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexVersionValue.class);
+ private static final long TRANSLOG_LOC_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Translog.Location.class);
private final Translog.Location translogLocation;
@@ -55,7 +56,7 @@ final class IndexVersionValue extends VersionValue {
@Override
public long ramBytesUsed() {
- return RAM_BYTES_USED + RamUsageEstimator.shallowSizeOf(translogLocation);
+ return RAM_BYTES_USED + (translogLocation == null ? 0L : TRANSLOG_LOC_RAM_BYTES_USED);
}
@Override
diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java
index 7b57fabdf1486..9c1e902606cab 100644
--- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java
+++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java
@@ -45,7 +45,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.NoSuchFileException;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -781,6 +780,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException
segmentMetadataMap.values().stream().map(metadata -> metadata.uploadedFilename).collect(Collectors.toSet())
);
}
+ Set deletedSegmentFiles = new HashSet<>();
for (String metadataFile : metadataFilesToBeDeleted) {
Map staleSegmentFilesMetadataMap = readMetadataFile(metadataFile).getMetadata();
Set staleSegmentRemoteFilenames = staleSegmentFilesMetadataMap.values()
@@ -788,31 +788,33 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException
.map(metadata -> metadata.uploadedFilename)
.collect(Collectors.toSet());
AtomicBoolean deletionSuccessful = new AtomicBoolean(true);
- List nonActiveDeletedSegmentFiles = new ArrayList<>();
- staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> {
- try {
- remoteDataDirectory.deleteFile(file);
- nonActiveDeletedSegmentFiles.add(file);
- if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) {
- segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file));
+ staleSegmentRemoteFilenames.stream()
+ .filter(file -> activeSegmentRemoteFilenames.contains(file) == false)
+ .filter(file -> deletedSegmentFiles.contains(file) == false)
+ .forEach(file -> {
+ try {
+ remoteDataDirectory.deleteFile(file);
+ deletedSegmentFiles.add(file);
+ if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) {
+ segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file));
+ }
+ } catch (NoSuchFileException e) {
+ logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile);
+ } catch (IOException e) {
+ deletionSuccessful.set(false);
+ logger.warn(
+ "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried",
+ file,
+ metadataFile
+ );
}
- } catch (NoSuchFileException e) {
- logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile);
- } catch (IOException e) {
- deletionSuccessful.set(false);
- logger.info(
- "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried",
- file,
- metadataFile
- );
- }
- });
- logger.debug("nonActiveDeletedSegmentFiles={}", nonActiveDeletedSegmentFiles);
+ });
if (deletionSuccessful.get()) {
logger.debug("Deleting stale metadata file {} from remote segment store", metadataFile);
remoteMetadataDirectory.deleteFile(metadataFile);
}
}
+ logger.debug("deletedSegmentFiles={}", deletedSegmentFiles);
}
public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) {
diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java
index 3d1794f8d3197..5c3beaf8509bd 100644
--- a/server/src/main/java/org/opensearch/indices/IndicesService.java
+++ b/server/src/main/java/org/opensearch/indices/IndicesService.java
@@ -301,6 +301,19 @@ public class IndicesService extends AbstractLifecycleComponent
Property.Final
);
+ /**
+ * If enabled, this setting enforces that indexes will be created with a replication type matching the cluster setting
+ * defined in cluster.indices.replication.strategy by rejecting any request that specifies a replication type that
+ * does not match the cluster setting. If disabled, a user can choose a replication type on a per-index basis using
+ * the index.replication.type setting.
+ */
+ public static final Setting CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING = Setting.boolSetting(
+ "cluster.index.restrict.replication.type",
+ false,
+ Property.NodeScope,
+ Property.Final
+ );
+
/**
* The node's settings.
*/
diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java
index 2dabb825cd227..cc43f4e5d79fb 100644
--- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java
+++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java
@@ -35,7 +35,6 @@
import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query;
-import org.apache.lucene.util.ArrayUtil;
import org.opensearch.action.search.SearchShardTask;
import org.opensearch.action.search.SearchType;
import org.opensearch.common.Nullable;
@@ -87,6 +86,8 @@
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
+import static org.opensearch.search.aggregations.bucket.BucketUtils.suggestShardSideQueueSize;
+
/**
* This class encapsulates the state needed to execute a search. It holds a reference to the
* shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on
@@ -410,7 +411,7 @@ public boolean shouldUseConcurrentSearch() {
*/
public LocalBucketCountThresholds asLocalBucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) {
if (shouldUseConcurrentSearch()) {
- return new LocalBucketCountThresholds(0, ArrayUtil.MAX_ARRAY_LENGTH - 1);
+ return new LocalBucketCountThresholds(0, suggestShardSideQueueSize(bucketCountThresholds.getShardSize()));
} else {
return new LocalBucketCountThresholds(bucketCountThresholds.getShardMinDocCount(), bucketCountThresholds.getShardSize());
}
diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java
index c4a782209421b..cea151748bfb6 100644
--- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java
+++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java
@@ -71,6 +71,7 @@
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.query.QueryShardContext;
import org.opensearch.index.translog.Translog;
+import org.opensearch.indices.IndexCreationException;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.InvalidAliasNameException;
import org.opensearch.indices.InvalidIndexNameException;
@@ -137,6 +138,7 @@
import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING;
import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING;
+import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING;
@@ -166,6 +168,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase {
private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey()
+ REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
+ final String REPLICATION_MISMATCH_VALIDATION_ERROR =
+ "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];";
+
@Before
public void setup() throws Exception {
super.setUp();
@@ -1239,6 +1244,105 @@ public void testIndexTemplateReplicationType() {
assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey()));
}
+ public void testClusterForceReplicationTypeInAggregateSettings() {
+ Settings settings = Settings.builder()
+ .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true)
+ .build();
+ ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ Settings nonMatchingReplicationIndexSettings = Settings.builder()
+ .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT)
+ .build();
+ request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test");
+ request.settings(nonMatchingReplicationIndexSettings);
+ IndexCreationException exception = expectThrows(
+ IndexCreationException.class,
+ () -> aggregateIndexSettings(
+ ClusterState.EMPTY_STATE,
+ request,
+ Settings.EMPTY,
+ null,
+ Settings.EMPTY,
+ IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
+ randomShardLimitService(),
+ Collections.emptySet(),
+ clusterSettings
+ )
+ );
+ assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getCause().getMessage());
+
+ Settings matchingReplicationIndexSettings = Settings.builder()
+ .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+ .build();
+ request.settings(matchingReplicationIndexSettings);
+ Settings aggregateIndexSettings = aggregateIndexSettings(
+ ClusterState.EMPTY_STATE,
+ request,
+ Settings.EMPTY,
+ null,
+ Settings.EMPTY,
+ IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
+ randomShardLimitService(),
+ Collections.emptySet(),
+ clusterSettings
+ );
+ assertEquals(ReplicationType.SEGMENT.toString(), aggregateIndexSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey()));
+ }
+
+ public void testClusterForceReplicationTypeInValidateIndexSettings() {
+ ClusterService clusterService = mock(ClusterService.class);
+ Metadata metadata = Metadata.builder()
+ .transientSettings(Settings.builder().put(Metadata.DEFAULT_REPLICA_COUNT_SETTING.getKey(), 1).build())
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .metadata(metadata)
+ .build();
+ ThreadPool threadPool = new TestThreadPool(getTestName());
+ // Enforce cluster level replication type setting
+ final Settings forceClusterSettingEnabled = Settings.builder()
+ .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true)
+ .build();
+ ClusterSettings clusterSettings = new ClusterSettings(forceClusterSettingEnabled, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ when(clusterService.getSettings()).thenReturn(forceClusterSettingEnabled);
+ when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
+ when(clusterService.state()).thenReturn(clusterState);
+
+ final MetadataCreateIndexService checkerService = new MetadataCreateIndexService(
+ forceClusterSettingEnabled,
+ clusterService,
+ null,
+ null,
+ null,
+ createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService),
+ new Environment(Settings.builder().put("path.home", "dummy").build(), null),
+ IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
+ threadPool,
+ null,
+ new SystemIndices(Collections.emptyMap()),
+ true,
+ new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings())
+ );
+ // Use DOCUMENT replication type setting for index creation
+ final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build();
+
+ IndexCreationException exception = expectThrows(
+ IndexCreationException.class,
+ () -> checkerService.validateIndexSettings("test", indexSettings, false)
+ );
+ assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getCause().getMessage());
+
+ // Cluster level replication type setting not enforced
+ final Settings forceClusterSettingDisabled = Settings.builder()
+ .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+ .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), false)
+ .build();
+ clusterSettings = new ClusterSettings(forceClusterSettingDisabled, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
+ checkerService.validateIndexSettings("test", indexSettings, false);
+ threadPool.shutdown();
+ }
+
public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() {
Settings settings = Settings.builder()
.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT)
diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
index dc2111fdcfc56..46be10ce62840 100644
--- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
@@ -1416,7 +1416,7 @@ public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException,
indexShard,
indexShard.getPendingPrimaryTerm() + 1,
globalCheckpoint,
- randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNo),
+ randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNoOfUpdatesOrDeletesBeforeRollback),
new ActionListener() {
@Override
public void onResponse(Releasable releasable) {
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
index 36cfd84ff960a..2c6c4afed69fd 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
@@ -51,6 +51,7 @@
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -122,6 +123,14 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase {
1,
"node-1"
);
+ private final String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(
+ 10,
+ 36,
+ 34,
+ 1,
+ 1,
+ "node-1"
+ );
@Before
public void setup() throws IOException {
@@ -979,6 +988,51 @@ public void testDeleteStaleCommitsActualDelete() throws Exception {
verify(remoteMetadataDirectory).deleteFile(metadataFilename3);
}
+ public void testDeleteStaleCommitsDeleteDedup() throws Exception {
+ Map> metadataFilenameContentMapping = new HashMap<>(populateMetadata());
+ metadataFilenameContentMapping.put(metadataFilename4, metadataFilenameContentMapping.get(metadataFilename3));
+
+ when(
+ remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(
+ RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX,
+ Integer.MAX_VALUE
+ )
+ ).thenReturn(new ArrayList<>(List.of(metadataFilename, metadataFilename2, metadataFilename3, metadataFilename4)));
+
+ when(remoteMetadataDirectory.getBlobStream(metadataFilename4)).thenAnswer(
+ I -> createMetadataFileBytes(
+ metadataFilenameContentMapping.get(metadataFilename4),
+ indexShard.getLatestReplicationCheckpoint(),
+ segmentInfos
+ )
+ );
+
+ remoteSegmentStoreDirectory.init();
+
+ // popluateMetadata() adds stub to return 4 metadata files
+ // We are passing lastNMetadataFilesToKeep=2 here so that oldest 2 metadata files will be deleted
+ remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2);
+
+ Set staleSegmentFiles = new HashSet<>();
+ for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) {
+ staleSegmentFiles.add(metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]);
+ }
+ for (String metadata : metadataFilenameContentMapping.get(metadataFilename4).values()) {
+ staleSegmentFiles.add(metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]);
+ }
+ staleSegmentFiles.forEach(file -> {
+ try {
+ // Even with the same files in 2 stale metadata files, delete should be called only once.
+ verify(remoteDataDirectory, times(1)).deleteFile(file);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true)));
+ verify(remoteMetadataDirectory).deleteFile(metadataFilename3);
+ verify(remoteMetadataDirectory).deleteFile(metadataFilename4);
+ }
+
public void testDeleteStaleCommitsActualDeleteIOException() throws Exception {
Map> metadataFilenameContentMapping = populateMetadata();
remoteSegmentStoreDirectory.init();
diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
index 62dcf54e25578..ba1600e6eb651 100644
--- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
+++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
@@ -1401,12 +1401,22 @@ public void testCollapseQuerySearchResults() throws Exception {
assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
if (executor != null) {
- assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L));
- assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L));
- assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L));
- assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L));
- assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L));
- assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L));
+ long maxScore = query.getTimeBreakdown().get("max_score");
+ long minScore = query.getTimeBreakdown().get("min_score");
+ long avgScore = query.getTimeBreakdown().get("avg_score");
+ long maxScoreCount = query.getTimeBreakdown().get("max_score_count");
+ long minScoreCount = query.getTimeBreakdown().get("min_score_count");
+ long avgScoreCount = query.getTimeBreakdown().get("avg_score_count");
+ assertThat(maxScore, greaterThan(0L));
+ assertThat(minScore, greaterThan(0L));
+ assertThat(avgScore, greaterThan(0L));
+ assertThat(maxScore, greaterThanOrEqualTo(avgScore));
+ assertThat(avgScore, greaterThanOrEqualTo(minScore));
+ assertThat(maxScoreCount, greaterThan(0L));
+ assertThat(minScoreCount, greaterThan(0L));
+ assertThat(avgScoreCount, greaterThan(0L));
+ assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount));
+ assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount));
}
assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
@@ -1436,12 +1446,22 @@ public void testCollapseQuerySearchResults() throws Exception {
assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
if (executor != null) {
- assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L));
- assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L));
- assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L));
- assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L));
- assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L));
- assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L));
+ long maxScore = query.getTimeBreakdown().get("max_score");
+ long minScore = query.getTimeBreakdown().get("min_score");
+ long avgScore = query.getTimeBreakdown().get("avg_score");
+ long maxScoreCount = query.getTimeBreakdown().get("max_score_count");
+ long minScoreCount = query.getTimeBreakdown().get("min_score_count");
+ long avgScoreCount = query.getTimeBreakdown().get("avg_score_count");
+ assertThat(maxScore, greaterThan(0L));
+ assertThat(minScore, greaterThan(0L));
+ assertThat(avgScore, greaterThan(0L));
+ assertThat(maxScore, greaterThanOrEqualTo(avgScore));
+ assertThat(avgScore, greaterThanOrEqualTo(minScore));
+ assertThat(maxScoreCount, greaterThan(0L));
+ assertThat(minScoreCount, greaterThan(0L));
+ assertThat(avgScoreCount, greaterThan(0L));
+ assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount));
+ assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount));
}
assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java
index 3a98a67b53920..ee816aa5f596d 100644
--- a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java
+++ b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java
@@ -145,6 +145,87 @@ public void run() {
assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue()));
}
+ public void testNoThreadContextToPreserve() throws InterruptedException, ExecutionException, TimeoutException {
+ final Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue()));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue()));
+
+ final Span local1 = tracer.startSpan(SpanCreationContext.internal().name("test-local-1"));
+ try (SpanScope localScope = tracer.withSpanInScope(local1)) {
+ try (StoredContext ignored = threadContext.stashContext()) {
+ assertThat(local1.getParentSpan(), is(nullValue()));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local1));
+ }
+ }
+
+ final Span local2 = tracer.startSpan(SpanCreationContext.internal().name("test-local-2"));
+ try (SpanScope localScope = tracer.withSpanInScope(local2)) {
+ try (StoredContext ignored = threadContext.stashContext()) {
+ assertThat(local2.getParentSpan(), is(nullValue()));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local2));
+ }
+ }
+
+ final Span local3 = tracer.startSpan(SpanCreationContext.internal().name("test-local-3"));
+ try (SpanScope localScope = tracer.withSpanInScope(local3)) {
+ try (StoredContext ignored = threadContext.stashContext()) {
+ assertThat(local3.getParentSpan(), is(nullValue()));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local3));
+ }
+ }
+ }
+ };
+
+ executorService.submit(threadContext.preserveContext(r)).get(1, TimeUnit.SECONDS);
+
+ assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue()));
+ }
+
+ public void testPreservingContextThreadContextMultipleSpans() throws InterruptedException, ExecutionException, TimeoutException {
+ final Span span = tracer.startSpan(SpanCreationContext.internal().name("test"));
+
+ try (SpanScope scope = tracer.withSpanInScope(span)) {
+ final Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue())));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span));
+
+ final Span local1 = tracer.startSpan(SpanCreationContext.internal().name("test-local-1"));
+ try (SpanScope localScope = tracer.withSpanInScope(local1)) {
+ try (StoredContext ignored = threadContext.stashContext()) {
+ assertThat(local1.getParentSpan(), is(span));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local1));
+ }
+ }
+
+ final Span local2 = tracer.startSpan(SpanCreationContext.internal().name("test-local-2"));
+ try (SpanScope localScope = tracer.withSpanInScope(local2)) {
+ try (StoredContext ignored = threadContext.stashContext()) {
+ assertThat(local2.getParentSpan(), is(span));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local2));
+ }
+ }
+
+ final Span local3 = tracer.startSpan(SpanCreationContext.internal().name("test-local-3"));
+ try (SpanScope localScope = tracer.withSpanInScope(local3)) {
+ try (StoredContext ignored = threadContext.stashContext()) {
+ assertThat(local3.getParentSpan(), is(span));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local3));
+ }
+ }
+ }
+ };
+
+ executorService.submit(threadContext.preserveContext(r)).get(1, TimeUnit.SECONDS);
+ }
+
+ assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue())));
+ assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue()));
+ }
+
public void testPreservingContextAndStashingThreadContext() throws InterruptedException, ExecutionException, TimeoutException {
final Span span = tracer.startSpan(SpanCreationContext.internal().name("test"));
diff --git a/settings.gradle b/settings.gradle
index 139d45013710f..24ab4a7a22237 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -10,7 +10,7 @@
*/
plugins {
- id "com.gradle.enterprise" version "3.15.1"
+ id "com.gradle.enterprise" version "3.16.1"
}
ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE')
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index 2b56a0fc3f993..7adf29792f27d 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -33,7 +33,7 @@ apply plugin: 'opensearch.java'
group = 'hdfs'
versions << [
- 'jetty': '9.4.52.v20230823'
+ 'jetty': '9.4.53.v20231009'
]
dependencies {
@@ -48,6 +48,9 @@ dependencies {
exclude group: "com.squareup.okhttp3"
exclude group: "org.xerial.snappy"
exclude module: "json-io"
+ exclude module: "logback-core"
+ exclude module: "logback-classic"
+ exclude module: "avro"
}
api "org.codehaus.jettison:jettison:${versions.jettison}"
api "org.apache.commons:commons-compress:${versions.commonscompress}"
@@ -67,10 +70,12 @@ dependencies {
api 'org.apache.zookeeper:zookeeper:3.9.1'
api "org.apache.commons:commons-text:1.11.0"
api "commons-net:commons-net:3.10.0"
+ api "ch.qos.logback:logback-core:1.2.13"
+ api "ch.qos.logback:logback-classic:1.2.13"
runtimeOnly "com.google.guava:guava:${versions.guava}"
runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") {
exclude group: "com.squareup.okio"
}
- runtimeOnly "com.squareup.okio:okio:3.6.0"
+ runtimeOnly "com.squareup.okio:okio:3.7.0"
runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5"
}
diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java
index 7614cd0e8f920..6215e84f42676 100644
--- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java
@@ -71,6 +71,7 @@
import org.opensearch.action.search.ClearScrollResponse;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.action.support.IndicesOptions;
+import org.opensearch.action.support.WriteRequest;
import org.opensearch.client.AdminClient;
import org.opensearch.client.Client;
import org.opensearch.client.ClusterAdminClient;
@@ -1646,6 +1647,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma
for (List segmented : partition) {
BulkRequestBuilder bulkBuilder = client().prepareBulk();
for (IndexRequestBuilder indexRequestBuilder : segmented) {
+ indexRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE);
bulkBuilder.add(indexRequestBuilder);
}
BulkResponse actionGet = bulkBuilder.execute().actionGet();