diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 06b761b1df8bd..48d978bede420 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -43,6 +43,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) +- Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4fee25885e39..4531d7493190c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) +- [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) +- Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) +- Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) +- Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) +- Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) @@ -14,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) ### Changed +- Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) - Updated `termsQuery` in `KeywordField` to set custom rewrite_override before executing query ### Deprecated @@ -21,6 +28,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) +- Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 6892af1b17f97..0502280cb69ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -158,7 +158,17 @@ private static List resolveArchiveProjects(File checkoutDir projects.addAll(asList("deb", "rpm")); if (bwcVersion.onOrAfter("7.0.0")) { // starting with 7.0 we bundle a jdk which means we have platform-specific archives - projects.addAll(asList("darwin-tar", "linux-tar", "windows-zip")); + projects.addAll( + asList( + "darwin-tar", + "darwin-arm64-tar", + "linux-tar", + "linux-arm64-tar", + "linux-ppc64le-tar", + "linux-s390x-tar", + "windows-zip" + ) + ); } else { // prior to 7.0 we published only a single zip and tar archives projects.addAll(asList("zip", "tar")); } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7d32ed3df7b76..eb67af909bccf 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -37,8 +37,8 @@ reactor_netty = 1.1.21 reactor = 3.5.19 # client dependencies -httpclient5 = 5.2.1 -httpcore5 = 5.2.2 +httpclient5 = 5.2.3 +httpcore5 = 5.2.5 httpclient = 4.5.14 httpcore = 4.4.16 httpasyncclient = 4.1.5 diff --git a/client/rest/build.gradle b/client/rest/build.gradle index f18df65dfddfa..93faf0024b51e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -47,10 +47,15 @@ dependencies { api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-reactive:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.slf4j:slf4j-api:${versions.slf4j}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" @@ -93,22 +98,52 @@ testingConventions { } } -thirdPartyAudit.ignoreMissingClasses( - 'org.conscrypt.Conscrypt', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - //commons-logging optional dependencies - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - 'org.apache.log4j.Priority', - //commons-logging provided dependencies - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener' -) +thirdPartyAudit { + ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.context.ContextRegistry', + 'io.micrometer.context.ContextSnapshot', + 'io.micrometer.context.ContextSnapshot$Scope', + 'io.micrometer.context.ContextSnapshotFactory', + 'io.micrometer.context.ContextSnapshotFactory$Builder', + 'io.micrometer.context.ThreadLocalAccessor', + 'io.micrometer.core.instrument.Clock', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tag', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.search.Search', + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + ignoreViolations( + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + ) +} tasks.withType(JavaCompile) { // Suppressing '[options] target value 8 is obsolete and will be removed in a future release' diff --git a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.2.3.jar.sha1 b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 deleted file mode 100644 index 94bc0fa49bdb0..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54ee1ed58fe8ac40be1083ea9873a6c734939ab9 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..bb40fe65854f6 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 @@ -0,0 +1 @@ +09425df4d1365cee86a8e031a036bdca4343da4b \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ab9241fc93d45 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 @@ -0,0 +1 @@ +f68949965075b957c12b4c1ef89fd4bab2a0fdb1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-LICENSE.txt b/client/rest/licenses/httpcore5-reactive-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore5-reactive-NOTICE.txt b/client/rest/licenses/httpcore5-reactive-NOTICE.txt new file mode 100644 index 0000000000000..fcf14beb5c1ec --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-NOTICE.txt @@ -0,0 +1,8 @@ + +Apache HttpComponents Core Reactive Extensions +Copyright 2005-2021 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + diff --git a/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 new file mode 100644 index 0000000000000..45a80e3f7e361 --- /dev/null +++ b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 @@ -0,0 +1 @@ +3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-LICENSE.txt b/client/rest/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e3c7e7c77495 --- /dev/null +++ b/client/rest/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,21 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-NOTICE.txt b/client/rest/licenses/reactive-streams-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/licenses/reactor-core-3.5.19.jar.sha1 b/client/rest/licenses/reactor-core-3.5.19.jar.sha1 new file mode 100644 index 0000000000000..04b59d2faae04 --- /dev/null +++ b/client/rest/licenses/reactor-core-3.5.19.jar.sha1 @@ -0,0 +1 @@ +1d49ce1d0df79f28d3927da5f4c46a895b94335f \ No newline at end of file diff --git a/client/rest/licenses/reactor-core-LICENSE.txt b/client/rest/licenses/reactor-core-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/client/rest/licenses/reactor-core-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/rest/licenses/reactor-core-NOTICE.txt b/client/rest/licenses/reactor-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 56e31a3742f35..d087c60927e3e 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -34,6 +34,8 @@ import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; import org.apache.hc.core5.concurrent.CancellableDependency; +import java.io.IOException; +import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; /** @@ -77,7 +79,7 @@ public synchronized boolean cancel() { } /** - * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, @@ -95,6 +97,31 @@ synchronized void runIfNotCancelled(Runnable runnable) { runnable.run(); } + /** + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and + * the subsequent attempt has not been started yet. + * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the + * {@link CancellationException}, otherwise we run the provided {@link Runnable} which will reset the request and send the next attempt. + * Note that this method must be synchronized as well as the {@link #cancel()} method, to prevent a request from being cancelled + * when there is no future to cancel, which would make cancelling the request a no-op. + */ + synchronized T callIfNotCancelled(Callable callable) throws IOException { + if (this.httpRequest.isCancelled()) { + throw newCancellationException(); + } + try { + return callable.call(); + } catch (final IOException ex) { + throw ex; + } catch (final Exception ex) { + throw new IOException(ex); + } + } + static CancellationException newCancellationException() { return new CancellationException("request was cancelled"); } diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index b062d937ed630..cb92e33e49156 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -40,11 +40,8 @@ import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.message.StatusLine; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * Holds an opensearch response. It wraps the {@link HttpResponse} returned and associates it with @@ -116,79 +113,11 @@ public HttpEntity getEntity() { return response.getEntity(); } - /** - * Optimized regular expression to test if a string matches the RFC 1123 date - * format (with quotes and leading space). Start/end of line characters and - * atomic groups are used to prevent backtracking. - */ - private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space - // quoted RFC 1123 date format - "\"" + // opening quote - "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking - "\\d{2} " + // 2-digit day - "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking - "\\d{4} " + // 4-digit year - "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) - "GMT" + // GMT - "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line - - /** - * Length of RFC 1123 format (with quotes and leading space), used in - * matchWarningHeaderPatternByPrefix(String). - */ - private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; - - /** - * Tests if a string matches the RFC 7234 specification for warning headers. - * This assumes that the warn code is always 299 and the warn agent is always - * OpenSearch. - * - * @param s the value of a warning header formatted according to RFC 7234 - * @return {@code true} if the input string matches the specification - */ - private static boolean matchWarningHeaderPatternByPrefix(final String s) { - return s.startsWith("299 OpenSearch-"); - } - - /** - * Refer to org.opensearch.common.logging.DeprecationLogger - */ - private static String extractWarningValueFromWarningHeader(final String s) { - String warningHeader = s; - - /* - * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for - * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). - */ - if (s.length() > WARNING_HEADER_DATE_LENGTH) { - final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); - final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); - - if (matcher.matches()) { - warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); - } - } - - final int firstQuote = warningHeader.indexOf('\"'); - final int lastQuote = warningHeader.length() - 1; - final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); - return warningValue; - } - /** * Returns a list of all warning headers returned in the response. */ public List getWarnings() { - List warnings = new ArrayList<>(); - for (Header header : response.getHeaders("Warning")) { - String warning = header.getValue(); - if (matchWarningHeaderPatternByPrefix(warning)) { - warnings.add(extractWarningValueFromWarningHeader(warning)); - } else { - warnings.add(warning); - } - } - return warnings; + return ResponseWarningsExtractor.getWarnings(response); } /** diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java new file mode 100644 index 0000000000000..441daff4f3af4 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +final class ResponseWarningsExtractor { + + /** + * Optimized regular expression to test if a string matches the RFC 1123 date + * format (with quotes and leading space). Start/end of line characters and + * atomic groups are used to prevent backtracking. + */ + private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space + // quoted RFC 1123 date format + "\"" + // opening quote + "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking + "\\d{2} " + // 2-digit day + "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking + "\\d{4} " + // 4-digit year + "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) + "GMT" + // GMT + "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line + + /** + * Length of RFC 1123 format (with quotes and leading space), used in + * matchWarningHeaderPatternByPrefix(String). + */ + private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; + + private ResponseWarningsExtractor() {} + + /** + * Returns a list of all warning headers returned in the response. + * @param response HTTP response + */ + static List getWarnings(final HttpResponse response) { + List warnings = new ArrayList<>(); + for (Header header : response.getHeaders("Warning")) { + String warning = header.getValue(); + if (matchWarningHeaderPatternByPrefix(warning)) { + warnings.add(extractWarningValueFromWarningHeader(warning)); + } else { + warnings.add(warning); + } + } + return warnings; + } + + /** + * Tests if a string matches the RFC 7234 specification for warning headers. + * This assumes that the warn code is always 299 and the warn agent is always + * OpenSearch. + * + * @param s the value of a warning header formatted according to RFC 7234 + * @return {@code true} if the input string matches the specification + */ + private static boolean matchWarningHeaderPatternByPrefix(final String s) { + return s.startsWith("299 OpenSearch-"); + } + + /** + * Refer to org.opensearch.common.logging.DeprecationLogger + */ + private static String extractWarningValueFromWarningHeader(final String s) { + String warningHeader = s; + + /* + * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for + * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). + */ + if (s.length() > WARNING_HEADER_DATE_LENGTH) { + final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); + final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); + + if (matcher.matches()) { + warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); + } + } + + final int firstQuote = warningHeader.indexOf('\"'); + final int lastQuote = warningHeader.length() - 1; + final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); + return warningValue; + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 15905add76c4f..5c87e3fda5701 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -62,14 +62,19 @@ import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactive.ReactiveResponseConsumer; import org.apache.hc.core5.reactor.IOReactorStatus; import org.apache.hc.core5.util.Args; import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.http.ReactiveHttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; @@ -83,6 +88,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; @@ -98,6 +104,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; @@ -106,6 +113,10 @@ import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; @@ -300,6 +311,23 @@ public boolean isRunning() { return client.getStatus() == IOReactorStatus.ACTIVE; } + /** + * Sends a streaming request to the OpenSearch cluster that the client points to and returns streaming response. This is an experimental API. + * @param request streaming request + * @return streaming response + * @throws IOException IOException + */ + public StreamingResponse streamRequest(StreamingRequest request) throws IOException { + final InternalStreamingRequest internalRequest = new InternalStreamingRequest(request); + + final StreamingResponse response = new StreamingResponse<>( + new RequestLine(internalRequest.httpRequest), + streamRequest(nextNodes(), internalRequest) + ); + + return response; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails @@ -332,13 +360,13 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { - RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); ClassicHttpResponse httpResponse; try { - httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); + httpResponse = client.execute(context.requestProducer(), context.asyncResponseConsumer(), context.context(), null).get(); } catch (Exception e) { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, e); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), e); + onFailure(context.node()); Exception cause = extractAndWrapCause(e); addSuppressedException(previousException, cause); if (nodeTuple.nodes.hasNext()) { @@ -352,7 +380,7 @@ private Response performRequest(final NodeTuple> nodeTuple, final } throw new IllegalStateException("unexpected exception type: must be either RuntimeException or IOException", cause); } - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node(), httpResponse); if (responseOrResponseException.responseException == null) { return responseOrResponseException.response; } @@ -363,6 +391,46 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } + private Publisher>> streamRequest( + final NodeTuple> nodeTuple, + final InternalStreamingRequest request + ) throws IOException { + return request.cancellable.callIfNotCancelled(() -> { + final Node node = nodeTuple.nodes.next(); + + final Mono>> publisher = Mono.create(emitter -> { + final RequestContext context = request.createContextForNextAttempt(node, nodeTuple.authCache, emitter); + final Future future = client.execute( + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), + null + ); + + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } + }); + + return publisher.flatMap(message -> { + try { + final ResponseOrResponseException responseOrResponseException = convertResponse(request, node, message); + if (responseOrResponseException.responseException == null) { + return Mono.just(message); + } else { + if (nodeTuple.nodes.hasNext()) { + return Mono.from(streamRequest(nodeTuple, request)); + } else { + return Mono.error(responseOrResponseException.responseException); + } + } + } catch (final Exception ex) { + return Mono.error(ex); + } + }); + }); + } + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); @@ -393,6 +461,40 @@ private ResponseOrResponseException convertResponse(InternalRequest request, Nod throw responseException; } + private ResponseOrResponseException convertResponse( + InternalStreamingRequest request, + Node node, + Message> message + ) throws IOException { + + // Streaming Response could accumulate a lot of data so we may not be able to fully consume it. + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse( + message.getHead().getCode(), + message.getHead().getReasonPhrase() + ); + final Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); + + RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); + int statusCode = httpResponse.getCode(); + + if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { + onResponse(node); + if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { + throw new WarningFailureException(response); + } + return new ResponseOrResponseException(response); + } + ResponseException responseException = new ResponseException(response); + if (isRetryStatus(statusCode)) { + // mark host dead and retry against next one + onFailure(node); + return new ResponseOrResponseException(responseException); + } + // mark host alive and don't retry, as the error should be a request problem + onResponse(node); + throw responseException; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * The request is executed asynchronously and the provided @@ -427,16 +529,23 @@ private void performRequestAsync( final FailureTrackingResponseListener listener ) { request.cancellable.runIfNotCancelled(() -> { - final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + final RequestContext context = request.createContextForNextAttempt( + nodeTuple.nodes.next(), + nodeTuple.authCache + ); Future future = client.execute( - context.requestProducer, - context.asyncResponseConsumer, - context.context, + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), new FutureCallback() { @Override public void completed(ClassicHttpResponse httpResponse) { try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse( + request, + context.node(), + httpResponse + ); if (responseOrResponseException.responseException == null) { listener.onSuccess(responseOrResponseException.response); } else { @@ -455,8 +564,8 @@ public void completed(ClassicHttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), failure); + onFailure(context.node()); if (nodeTuple.nodes.hasNext()) { listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); @@ -822,6 +931,66 @@ public void remove() { } } + private class InternalStreamingRequest { + private final StreamingRequest request; + private final Set ignoreErrorCodes; + private final HttpUriRequestBase httpRequest; + private final Cancellable cancellable; + private final WarningsHandler warningsHandler; + + InternalStreamingRequest(StreamingRequest request) { + this.request = request; + Map params = new HashMap<>(request.getParameters()); + // ignore is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove("ignore"); + this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); + URI uri = buildUri(pathPrefix, request.getEndpoint(), params); + this.httpRequest = createHttpRequest(request.getMethod(), uri, null); + this.cancellable = Cancellable.fromRequest(httpRequest); + setHeaders(httpRequest, request.getOptions().getHeaders()); + setRequestConfig(httpRequest, request.getOptions().getRequestConfig()); + this.warningsHandler = request.getOptions().getWarningsHandler() == null + ? RestClient.this.warningsHandler + : request.getOptions().getWarningsHandler(); + } + + private void setHeaders(HttpRequest httpRequest, Collection
requestHeaders) { + // request headers override default headers, so we don't add default headers if they exist as request headers + final Set requestNames = new HashSet<>(requestHeaders.size()); + for (Header requestHeader : requestHeaders) { + httpRequest.addHeader(requestHeader); + requestNames.add(requestHeader.getName()); + } + for (Header defaultHeader : defaultHeaders) { + if (requestNames.contains(defaultHeader.getName()) == false) { + httpRequest.addHeader(defaultHeader); + } + } + if (compressionEnabled) { + httpRequest.addHeader("Accept-Encoding", "gzip"); + } + } + + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { + if (requestConfig != null) { + httpRequest.setConfig(requestConfig); + } + } + + public Publisher getPublisher() { + return request.getBody(); + } + + RequestContext createContextForNextAttempt( + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.httpRequest.reset(); + return new ReactiveRequestContext(this, node, authCache, emitter); + } + } + private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; @@ -868,12 +1037,22 @@ private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requ } } - RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { + RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { this.httpRequest.reset(); - return new RequestContext(this, node, authCache); + return new AsyncRequestContext(this, node, authCache); } } + private interface RequestContext { + Node node(); + + AsyncRequestProducer requestProducer(); + + AsyncResponseConsumer asyncResponseConsumer(); + + HttpClientContext context(); + } + /** * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext @@ -934,13 +1113,73 @@ public void clear() { } - private static class RequestContext { + private static class ReactiveRequestContext implements RequestContext { + private final Node node; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; + private final HttpClientContext context; + + ReactiveRequestContext( + InternalStreamingRequest request, + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.node = node; + // we stream the request body if the entity allows for it + this.requestProducer = ReactiveHttpUriRequestProducer.create(request.httpRequest, node.getHost(), request.getPublisher()); + this.asyncResponseConsumer = new ReactiveResponseConsumer(new FutureCallback>>() { + @Override + public void failed(Exception ex) { + emitter.error(ex); + } + + @Override + public void completed(Message> result) { + if (result == null) { + emitter.success(); + } else { + emitter.success(result); + } + } + + @Override + public void cancelled() { + failed(new CancellationException("Future cancelled")); + } + }); + this.context = HttpClientContext.create(); + context.setAuthCache(new WrappingAuthCache(context, authCache)); + } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } + } + + private static class AsyncRequestContext implements RequestContext { private final Node node; private final AsyncRequestProducer requestProducer; private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; - RequestContext(InternalRequest request, Node node, AuthCache authCache) { + AsyncRequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); @@ -950,6 +1189,26 @@ private static class RequestContext { this.context = HttpClientContext.create(); context.setAuthCache(new WrappingAuthCache(context, authCache)); } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } } private static Set getIgnoreErrorCodes(String ignoreString, String requestMethod) { diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java new file mode 100644 index 0000000000000..e1767407b1353 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import org.reactivestreams.Publisher; + +import static java.util.Collections.unmodifiableMap; + +/** + * HTTP Streaming Request to OpenSearch. This is an experimental API. + */ +public class StreamingRequest { + private final String method; + private final String endpoint; + private final Map parameters = new HashMap<>(); + + private RequestOptions options = RequestOptions.DEFAULT; + private final Publisher publisher; + + /** + * Constructor + * @param method method + * @param endpoint endpoint + * @param publisher publisher + */ + public StreamingRequest(String method, String endpoint, Publisher publisher) { + this.method = method; + this.endpoint = endpoint; + this.publisher = publisher; + } + + /** + * Get endpoint + * @return endpoint + */ + public String getEndpoint() { + return endpoint; + } + + /** + * Get method + * @return method + */ + public String getMethod() { + return method; + } + + /** + * Get options + * @return options + */ + public RequestOptions getOptions() { + return options; + } + + /** + * Get parameters + * @return parameters + */ + public Map getParameters() { + if (options.getParameters().isEmpty()) { + return unmodifiableMap(parameters); + } else { + Map combinedParameters = new HashMap<>(parameters); + combinedParameters.putAll(options.getParameters()); + return unmodifiableMap(combinedParameters); + } + } + + /** + * Add a query string parameter. + * @param name the name of the url parameter. Must not be null. + * @param value the value of the url url parameter. If {@code null} then + * the parameter is sent as {@code name} rather than {@code name=value} + * @throws IllegalArgumentException if a parameter with that name has + * already been set + */ + public void addParameter(String name, String value) { + Objects.requireNonNull(name, "url parameter name cannot be null"); + if (parameters.containsKey(name)) { + throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]"); + } else { + parameters.put(name, value); + } + } + + /** + * Add query parameters using the provided map of key value pairs. + * + * @param paramSource a map of key value pairs where the key is the url parameter. + * @throws IllegalArgumentException if a parameter with that name has already been set. + */ + public void addParameters(Map paramSource) { + paramSource.forEach(this::addParameter); + } + + /** + * Body publisher + * @return body publisher + */ + public Publisher getBody() { + return publisher; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java new file mode 100644 index 0000000000000..87d404c115723 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; + +import java.util.List; + +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * HTTP Streaming Response from OpenSearch. This is an experimental API. + */ +public class StreamingResponse { + private final RequestLine requestLine; + private final Mono>> publisher; + private volatile HttpHost host; + + /** + * Constructor + * @param requestLine request line + * @param publisher message publisher(response with a body) + */ + public StreamingResponse(RequestLine requestLine, Publisher>> publisher) { + this.requestLine = requestLine; + // We cache the publisher here so the body or / and HttpResponse could + // be consumed independently or/and more than once. + this.publisher = Mono.from(publisher).cache(); + } + + /** + * Set host + * @param host host + */ + public void setHost(HttpHost host) { + this.host = host; + } + + /** + * Get request line + * @return request line + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Get host + * @return host + */ + public HttpHost getHost() { + return host; + } + + /** + * Get response boby {@link Publisher} + * @return response boby {@link Publisher} + */ + public Publisher getBody() { + return publisher.flatMapMany(m -> Flux.from(m.getBody())); + } + + /** + * Returns the status line of the current response + */ + public StatusLine getStatusLine() { + return new StatusLine( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } + + /** + * Returns a list of all warning headers returned in the response. + */ + public List getWarnings() { + return ResponseWarningsExtractor.getWarnings( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java new file mode 100644 index 0000000000000..63a71e29b8b31 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.reactive.ReactiveEntityProducer; +import org.apache.hc.core5.util.Args; + +import java.nio.ByteBuffer; + +import org.reactivestreams.Publisher; + +/** + * The reactive producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class ReactiveHttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + ReactiveHttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @param publisher publisher + * @return new request producer + */ + public static ReactiveHttpUriRequestProducer create( + final HttpUriRequestBase request, + final HttpHost host, + Publisher publisher + ) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final Header contentTypeHeader = request.getFirstHeader("Content-Type"); + final ContentType contentType = (contentTypeHeader == null) + ? ContentType.APPLICATION_JSON + : ContentType.parse(contentTypeHeader.getValue()); + + final Header contentEncodingHeader = request.getFirstHeader("Content-Encoding"); + final String contentEncoding = (contentEncodingHeader == null) ? null : contentEncodingHeader.getValue(); + + final AsyncEntityProducer entityProducer = new ReactiveEntityProducer(publisher, -1, contentType, contentEncoding); + return new ReactiveHttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index dd51da3a30d8c..f4f1c57cdd588 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -56,12 +56,15 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; +import reactor.core.publisher.Mono; + import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -418,6 +421,16 @@ public void testIsRunning() { assertFalse(restClient.isRunning()); } + public void testStreamWithUnsupportedMethod() throws Exception { + try (RestClient restClient = createRestClient()) { + final UnsupportedOperationException ex = assertThrows( + UnsupportedOperationException.class, + () -> restClient.streamRequest(new StreamingRequest<>("unsupported", randomAsciiLettersOfLength(5), Mono.empty())) + ); + assertEquals("http method not supported: unsupported", ex.getMessage()); + } + } + private static void assertNodes(NodeTuple> nodeTuple, AtomicInteger lastNodeIndex, int runs) throws IOException { int distance = lastNodeIndex.get() % nodeTuple.nodes.size(); /* diff --git a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java new file mode 100644 index 0000000000000..2f33eb513c165 --- /dev/null +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm; + +import java.security.BasicPermission; + +/** + * Permission to utilize methods in the ThreadContext class that are normally not accessible + * + * @see ThreadGroup + * @see SecureSM + */ +public final class ThreadContextPermission extends BasicPermission { + + /** + * Creates a new ThreadContextPermission object. + * + * @param name target name + */ + public ThreadContextPermission(String name) { + super(name); + } + + /** + * Creates a new ThreadContextPermission object. + * This constructor exists for use by the {@code Policy} object to instantiate new Permission objects. + * + * @param name target name + * @param actions ignored + */ + public ThreadContextPermission(String name, String actions) { + super(name, actions); + } +} diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index fb51a0bb7f157..7b828109139c8 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -46,6 +46,7 @@ ext { testClusters.all { module ':modules:mapper-extras' + module ':modules:aggs-matrix-stats' systemProperty 'opensearch.scripting.update.ctx_in_params', 'false' // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 systemProperty 'opensearch.transport.cname_in_publish_address', 'true' diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml new file mode 100644 index 0000000000000..87c260ce5f308 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml @@ -0,0 +1,1515 @@ +--- +setup: +- skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + +# -- NOT SUPPORTED: -- +# geobounds +# scripted metric +# -- NOT SUPPORTED: -- +# Any geo agg +# sig terms/text + +- do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + os: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_os: + type: keyword + script: "emit(params._source[\"os\"])" + derived_long: + type: long + script: "emit(params._source[\"long\"])" + derived_float: + type: float + script: "emit(params._source[\"float\"])" + derived_double: + type: double + script: "emit(params._source[\"double\"])" + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + derived_object: + type: object + properties: + keyword: keyword + ip: ip + os: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + +- do: + bulk: + refresh: true + body: + - index: + _index: test + _id: 1 + - text: "peter piper" + keyword: "foo" + os: "mac" + long: 1 + float: 1.0 + double: 1.0 + date: "2017-01-01T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.1" + boolean: true + array_of_long: [ 1, 2 ] + json_field: "{\"text\":\"peter piper\",\"keyword\":\"foo\",\"os\":\"mac\",\"long\":1,\"float\":1.0,\"double\":1.0,\"date\":\"2017-01-01T00:00:00Z\",\"ip\":\"192.168.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}" + - index: + _index: test + _id: 2 + - text: "piper picked a peck" + keyword: "bar" + os: "windows" + long: 2 + float: 2.0 + double: 2.0 + date: "2017-01-02T00:00:00Z" + geo: [ -118.2437, 34.0522 ] + ip: "10.0.0.1" + boolean: false + array_of_long: [ 2, 3 ] + json_field: "{\"keyword\":\"bar\",\"long\":2,\"float\":2.0,\"os\":\"windows\",\"double\":2.0,\"date\":\"2017-01-02T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":false, \"array_of_long\": [2, 3]}" + - index: + _index: test + _id: 3 + - text: "peck of pickled peppers" + keyword: "baz" + os: "mac" + long: -3 + float: -3.0 + double: -3.0 + date: "2017-01-03T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "172.16.0.1" + boolean: true + array_of_long: [ 3, 4 ] + json_field: "{\"keyword\":\"baz\",\"long\":-3,\"float\":-3.0,\"os\":\"mac\",\"double\":-3.0,\"date\":\"2017-01-03T00:00:00Z\",\"ip\":\"172.16.0.1\",\"boolean\":true, \"array_of_long\": [3, 4]}" + - index: + _index: test + _id: 4 + - text: "pickled peppers" + keyword: "qux" + os: "windows" + long: 4 + float: 4.0 + double: 4.0 + date: "2017-01-04T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.2" + boolean: false + array_of_long: [ 4, 5 ] + json_field: "{\"keyword\":\"qux\",\"long\":4,\"float\":4.0,\"os\":\"windows\",\"double\":4.0,\"date\":\"2017-01-04T00:00:00Z\",\"ip\":\"192.168.0.2\",\"boolean\":false, \"array_of_long\": [4, 5]}" + - index: + _index: test + _id: 5 + - text: "peppers" + keyword: "quux" + os: "mac" + long: 5 + float: 5.0 + double: 5.0 + date: "2017-01-05T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "10.0.0.2" + boolean: true + array_of_long: [ 5, 6 ] + json_field: "{\"keyword\":\"quux\",\"long\":5,\"float\":5.0,\"os\":\"mac\",\"double\":5.0,\"date\":\"2017-01-05T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":true, \"array_of_long\": [5, 6]}" + +- do: + indices.refresh: + index: [test] + +### BUCKET AGGS +--- +"Test terms aggregation on derived_keyword from search definition": +- do: + search: + index: test + body: + derived: + derived_keyword_search_definition: + type: keyword + script: "emit(params._source[\"keyword\"])" + size: 0 + aggs: + keywords: + terms: + field: derived_keyword_search_definition + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test terms aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_boolean: true + false_values: + term: + derived_boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_long: + gte: 3 + low_num: + range: + derived_long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +### METRIC AGGS + +--- +"Test stats aggregation on derived_array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.double_percentiles.values: 3} + +--- +"Test percentile ranks aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.long_percentile_ranks.values: 2} + +--- +"Test top hits aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_long, derived_float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_float" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_long" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +## Pipeline agg +--- +"Test simple pipeline agg with derived_keyword and long": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + aggs: + sum_derived_longs: + sum: + field: derived_long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test terms aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +--- +"Test composite agg": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_os + - keyword: + terms: + field: derived_keyword + - is_true: + terms: + field: derived_boolean + aggs: + avg_long: + avg: + field: derived_long +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation": +- do: + search: + index: test + body: + query: + term: + derived_keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test missing aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_agg: + missing: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_agg.doc_count: 0 } + +--- +"Test value_count aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test weighted_avg aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + weighted_avg_agg: + weighted_avg: + value: + field: derived_long + weight: + field: derived_float + +- match: { hits.total.value: 5 } +- is_true: aggregations.weighted_avg_agg.value + +--- +"Test diversified_sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + diversified_sampler_agg: + diversified_sampler: + field: derived_keyword + max_docs_per_value: 1 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.diversified_sampler_agg.doc_count: 5 } +- match: { aggregations.diversified_sampler_agg.avg_long.value: 1.8 } + +--- +"Test sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + sampler_agg: + sampler: + shard_size: 2 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- is_true: aggregations.sampler_agg.doc_count +- is_true: aggregations.sampler_agg.avg_long.value + +--- +"Test multi_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_keyword + - field: derived_os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + +#### SAME TESTS WITH DERIVED_OBJECT +--- +"Test terms aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_object.long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_object.float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_object.float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_object.date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_object.date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_object.boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_object.boolean: true + false_values: + term: + derived_object.boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_object.long: + gte: 3 + low_num: + range: + derived_object.long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +--- +"Test stats aggregation on derived_object.array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_object.array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_object_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_object.double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_object.double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.double_percentiles.values: 3} + +--- +"Test percentile ranks aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_object.long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.long_percentile_ranks.values: 2} + +--- +"Test top hits aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_object.keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_object.long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_object.long, derived_object.float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_object.long" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_object.float" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_object.long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +--- +"Test simple pipeline agg derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + aggs: + sum_derived_longs: + sum: + field: derived_object.long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test composite agg on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_object.os + - keyword: + terms: + field: derived_object.keyword + - is_true: + terms: + field: derived_object.boolean + aggs: + avg_long: + avg: + field: derived_object.long +- length: { aggregations.test_composite_agg.buckets: 5 } +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram on derived_object": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_object.date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_object.long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_object.long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_object.keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation on derived_object": +- do: + search: + index: test + body: + query: + term: + derived_object.keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_object.long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test value_count aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test multi_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_object.keyword + - field: derived_object.os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + + +### IP specific tests +--- +"Test terms aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_object.ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +### TEST UNSUPPORTED AGG TYPES +--- +"Test sig terms not supported": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_os: + significant_terms: + field: "derived_os" + min_doc_count: 1 + size: 10 + +--- +"Test significant text": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_words: + significant_text: + field: "derived_text" + size: 10 + min_doc_count: 1 + +--- +"Test scripted_metric aggregation": +- do: + catch: /A document doesn't have a value for a field/ + search: + index: test + body: + size: 0 + aggs: + scripted_metric_agg: + scripted_metric: + init_script: "state.arr = []" + map_script: "state.arr.add(doc.derived_long.value)" + combine_script: "return 0" + reduce_script: "return 0" + +--- +"Test geo_distance aggregation on derived_geo": +- do: + catch: /aggregation_execution_exception/ + search: + index: test + rest_total_hits_as_int: true + body: + size: 0 + aggs: + distance: + geo_distance: + field: derived_geo + origin: "35.7796, -78.6382" + ranges: + - to: 1000000 + - from: 1000000 + to: 5000000 + - from: 5000000 diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 1a94def3fdff1..089e57f062a9f 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -46,7 +46,7 @@ dependencies { api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + javaRestTestImplementation "io.projectreactor:reactor-test:${versions.reactor}" testImplementation project(":modules:transport-netty4") } @@ -80,6 +80,10 @@ javaRestTest { systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' } +testClusters.javaRestTest { + setting 'http.type', 'reactor-netty4' +} + thirdPartyAudit { ignoreMissingClasses( 'com.aayushatharva.brotli4j.Brotli4jLoader', diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java new file mode 100644 index 0000000000000..62834483b5e9b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Map; + +import static org.opensearch.core.rest.RestStatus.REQUEST_URI_TOO_LONG; +import static org.hamcrest.Matchers.equalTo; + +public class ReactorNetty4BadRequestIT extends OpenSearchRestTestCase { + + public void testBadRequest() throws IOException { + final Response response = client().performRequest(new Request("GET", "/_nodes/settings")); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("nodes"); + int maxMaxInitialLineLength = Integer.MIN_VALUE; + final Setting httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + final String key = httpMaxInitialLineLength.getKey().substring("http.".length()); + for (Map.Entry entry : map.entrySet()) { + @SuppressWarnings("unchecked") + final Map settings = (Map) ((Map) entry.getValue()).get("settings"); + final int maxIntialLineLength; + if (settings.containsKey("http")) { + @SuppressWarnings("unchecked") + final Map httpSettings = (Map) settings.get("http"); + if (httpSettings.containsKey(key)) { + maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String) httpSettings.get(key), key).bytesAsInt(); + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength); + } + + final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a'); + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request(randomFrom("GET", "POST", "PUT"), path)) + ); + // The reactor-netty implementation does not provide a hook to customize or intercept request decoder errors at the moment (see + // please https://github.com/reactor/reactor-netty/issues/3327). + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(REQUEST_URI_TOO_LONG.getStatus())); + } + + public void testInvalidParameterValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + request.addParameter("pretty", "neither-true-nor-false"); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + final RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Content-Type", "\t"); + request.setOptions(options); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java new file mode 100644 index 0000000000000..663eb9ef6e946 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java @@ -0,0 +1,204 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.rest.RestStatus.NOT_FOUND; +import static org.opensearch.core.rest.RestStatus.OK; +import static org.hamcrest.Matchers.greaterThan; + +public class ReactorNetty4HeadBodyIsEmptyIT extends OpenSearchRestTestCase { + public void testHeadRoot() throws IOException { + headTestCase("/", emptyMap(), greaterThan(0)); + headTestCase("/", singletonMap("pretty", ""), greaterThan(0)); + headTestCase("/", singletonMap("pretty", "true"), greaterThan(0)); + } + + private void createTestDoc() throws IOException { + createTestDoc("test"); + } + + private void createTestDoc(final String indexName) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("test", "test"); + } + builder.endObject(); + Request request = new Request("PUT", "/" + indexName + "/_doc/" + "1"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + } + } + + public void testDocumentExists() throws IOException { + createTestDoc(); + headTestCase("/test/_doc/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_doc/1", singletonMap("pretty", "true"), greaterThan(0)); + headTestCase("/test/_doc/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testIndexExists() throws IOException { + createTestDoc(); + headTestCase("/test", emptyMap(), greaterThan(0)); + headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); + } + + public void testAliasExists() throws IOException { + createTestDoc(); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startArray("actions"); + { + builder.startObject(); + { + builder.startObject("add"); + { + builder.field("index", "test"); + builder.field("alias", "test_alias"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + Request request = new Request("POST", "/_aliases"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_alias/test_alias", emptyMap(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), greaterThan(0)); + } + } + + public void testAliasDoesNotExist() throws IOException { + createTestDoc(); + headTestCase("/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testTemplateExists() throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.array("index_patterns", "*"); + builder.startObject("settings"); + { + builder.field("number_of_replicas", 0); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/_template/template"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_template/template", emptyMap(), greaterThan(0)); + } + } + + public void testGetSourceAction() throws IOException { + createTestDoc(); + headTestCase("/test/_source/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_source/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("mappings"); + { + builder.startObject("_source"); + { + builder.field("enabled", false); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/test-no-source"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + createTestDoc("test-no-source"); + headTestCase("/test-no-source/_source/1", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + } + + public void testException() throws IOException { + /* + * This will throw an index not found exception which will be sent on the channel; previously when handling HEAD requests that would + * throw an exception, the content was swallowed and a content length header of zero was returned. Instead of swallowing the content + * we now let it rise up to the upstream channel so that it can compute the content length that would be returned. This test case is + * a test for this situation. + */ + headTestCase("/index-not-found-exception", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + private void headTestCase(final String url, final Map params, final Matcher matcher) throws IOException { + headTestCase(url, params, OK.getStatus(), matcher); + } + + private void headTestCase( + final String url, + final Map params, + final int expectedStatusCode, + final Matcher matcher, + final String... expectedWarnings + ) throws IOException { + Request request = new Request("HEAD", url); + for (Map.Entry param : params.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setOptions(expectWarnings(expectedWarnings)); + Response response = client().performRequest(request); + assertEquals(expectedStatusCode, response.getStatusLine().getStatusCode()); + assertThat(Integer.valueOf(response.getHeader("Content-Length")), matcher); + assertNull("HEAD requests shouldn't have a response body but " + url + " did", response.getEntity()); + } + +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java new file mode 100644 index 0000000000000..c564e289e3f88 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; +import org.junit.After; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; +import reactor.test.scheduler.VirtualTimeScheduler; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testStreamingRequest() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"2\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"3\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"4\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"5\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingBadRequest() throws IOException { + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "not-supported-policy"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectErrorMatches( + ex -> ex instanceof ResponseException && ((ResponseException) ex).getResponse().getStatusLine().getStatusCode() == 400 + ) + .verify(Duration.ofSeconds(10)); + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(400)); + assertThat(streamingResponse.getWarnings(), empty()); + } + + public void testStreamingBadStream() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n", + "{ \"name\": \"josh\" }\n" + ); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"type\":\"illegal_argument_exception\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java new file mode 100644 index 0000000000000..a978af1b11db4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.apache.hc.core5.http.ConnectionClosedException; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.After; + +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.subscriber.TestSubscriber; + +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingStressIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-stress-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = adminClient().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testCloseClientStreamingRequest() throws Exception { + final AtomicInteger id = new AtomicInteger(0); + final Stream stream = Stream.generate( + () -> "{ \"index\": { \"_index\": \"test-stress-streaming\", \"_id\": \"" + + id.incrementAndGet() + + "\" } }\n" + + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(Duration.ofMillis(500)).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + TestSubscriber subscriber = TestSubscriber.create(); + streamingResponse.getBody().subscribe(subscriber); + + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + // Await for subscriber to receive at least one chunk + assertBusy(() -> assertThat(subscriber.getReceivedOnNext(), not(empty()))); + + // Close client forceably + executor.schedule(() -> { + client().close(); + return null; + }, 2, TimeUnit.SECONDS); + + // Await for subscriber to terminate + subscriber.block(Duration.ofSeconds(10)); + assertThat( + subscriber.expectTerminalError(), + anyOf(instanceOf(InterruptedIOException.class), instanceOf(ConnectionClosedException.class)) + ); + } finally { + executor.shutdown(); + if (executor.awaitTermination(1, TimeUnit.SECONDS) == false) { + executor.shutdownNow(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index 906bbfd072da8..7f4a8f6cdef02 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -44,6 +44,7 @@ import java.util.List; import java.util.Optional; +import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelOption; import io.netty.channel.socket.nio.NioChannelOption; @@ -390,7 +391,9 @@ protected Publisher incomingRequest(HttpServerRequest request, HttpServerR response.chunkedTransfer(false); response.compression(true); r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); - return Mono.from(response.sendObject(r.content())); + + final ByteBuf content = r.content().copy(); + return Mono.from(response.sendObject(content)); }); } } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java index 7df0b3c0c35fe..3dae2d57cf6a6 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java @@ -55,9 +55,14 @@ public void addCloseListener(ActionListener listener) { @Override public void sendResponse(HttpResponse response, ActionListener listener) { - emitter.next(createResponse(response)); - listener.onResponse(null); - emitter.complete(); + try { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } catch (final Exception ex) { + emitter.error(ex); + listener.onFailure(ex); + } } @Override diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java index 56dadea0477c5..1aa03aa9967e2 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java @@ -101,6 +101,8 @@ public void receiveChunk(HttpChunk message) { lastChunkReceived = true; producer.complete(); } + } catch (final Exception ex) { + producer.error(ex); } finally { message.close(); } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java index f34f54e561021..8ed6710c8a1e3 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java @@ -44,7 +44,7 @@ public void subscribe(Subscriber s) { } HttpChunk createChunk(HttpContent chunk, boolean last) { - return new ReactorNetty4HttpChunk(chunk.content().retain(), last); + return new ReactorNetty4HttpChunk(chunk.copy().content(), last); } StreamingHttpChannel httpChannel() { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java index 616edccdfc396..6aaccc500072b 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java @@ -21,7 +21,11 @@ class ReactorNetty4StreamingResponseProducer implements StreamingHttpContentSend private volatile FluxSink emitter; ReactorNetty4StreamingResponseProducer() { - this.sender = Flux.create(emitter -> this.emitter = emitter); + this.sender = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter; } @Override diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index f48ddc26d929b..496fda6bb717d 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -35,6 +35,7 @@ apply plugin: 'opensearch.test-with-dependencies' dependencies { testImplementation project(path: ':modules:transport-netty4') // for http + testImplementation project(path: ':plugins:transport-reactor-netty4') // for http testImplementation project(path: ':plugins:transport-nio') testImplementation project(path: ':plugins:identity-shiro') // for http } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java index 08974b902c418..6d8e80a0a63ea 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java @@ -38,6 +38,7 @@ import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; import org.junit.BeforeClass; import java.util.Arrays; @@ -53,7 +54,7 @@ public abstract class HttpSmokeTestCase extends OpenSearchIntegTestCase { @BeforeClass public static void setUpTransport() { nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); - nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class)); clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); } @@ -71,6 +72,8 @@ private static String getTypeKey(Class clazz) { private static String getHttpTypeKey(Class clazz) { if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; + } else if (clazz.equals(ReactorNetty4Plugin.class)) { + return ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME; } else { assert clazz.equals(Netty4ModulePlugin.class); return Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME; @@ -92,7 +95,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class); } @Override diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java index 78398e10b9ce8..1a806b033eb8a 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java @@ -26,6 +26,8 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.StringContains.containsString; @@ -42,7 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ShiroIdentityPlugin.class); + return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class, ShiroIdentityPlugin.class); } diff --git a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml index a08090100989a..4fabd038cf915 100644 --- a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml +++ b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml @@ -3,5 +3,8 @@ + + + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml index 9864bfbbb26e9..f4f8b3752bec8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml @@ -8,8 +8,8 @@ "Mappings and Supported queries": - skip: - version: " - 2.99.99" - reason: "fixed in 3.0.0" + version: " - 2.15.99" + reason: "fixed in 2.16.0" # Create index with constant_keyword field type - do: diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index eccc903dfac82..bcf23a37c0010 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -886,7 +886,7 @@ public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Ex assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); } - public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws Exception { + public void testBatchModeEnabledWithDisabledTimeoutAndClusterGreen() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, @@ -920,8 +920,8 @@ public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws .put("node.name", clusterManagerName) .put(clusterManagerDataPathSettings) .put(ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE.getKey(), 5) - .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") - .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) .build() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index e40928f15e8a8..fdb12639c65be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -42,6 +42,7 @@ import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; @@ -90,6 +91,7 @@ import static org.opensearch.script.MockScriptPlugin.NAME; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -919,7 +921,7 @@ public void testSortMissingNumbers() throws Exception { client().prepareIndex("test") .setId("3") .setSource( - jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 2).endObject() + jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 3).endObject() ) .get(); @@ -964,6 +966,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing(randomBoolean() ? 1 : "1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // FLOAT logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1001,6 +1015,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("d_value").order(SortOrder.ASC).missing(randomBoolean() ? 1.1 : "1.1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // UNSIGNED_LONG logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1037,6 +1063,24 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? 2 : "2")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with negative missing value"); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? -1 : "-1")); + assertFailures(searchRequestBuilder, RestStatus.BAD_REQUEST, containsString("Value [-1] is out of range for an unsigned long")); } public void testSortMissingNumbersMinMax() throws Exception { diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index dfecf4f462c4d..ed2943db94420 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -37,8 +37,8 @@ import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -50,7 +50,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { +public class SearchShardTask extends QueryGroupTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index d3c1043c50cce..2a1a961e7607b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -35,8 +35,8 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -49,7 +49,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchTask extends CancellableTask implements SearchBackpressureTask { +public class SearchTask extends QueryGroupTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 7d3237d43cd5c..88bf7ebea8e52 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -101,6 +101,7 @@ import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.opensearch.wlm.QueryGroupTask; import java.util.ArrayList; import java.util.Arrays; @@ -442,6 +443,12 @@ private void executeRequest( ); searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + // At this point either the QUERY_GROUP_ID header will be present in ThreadContext either via ActionFilter + // or HTTP header (HTTP header will be deprecated once ActionFilter is implemented) + if (task instanceof QueryGroupTask) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + PipelinedRequest searchRequest; ActionListener listener; try { diff --git a/server/src/main/java/org/opensearch/client/OriginSettingClient.java b/server/src/main/java/org/opensearch/client/OriginSettingClient.java index 1b0e08cc489c4..27d87227df7bc 100644 --- a/server/src/main/java/org/opensearch/client/OriginSettingClient.java +++ b/server/src/main/java/org/opensearch/client/OriginSettingClient.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -65,7 +66,11 @@ protected void ActionListener listener ) { final Supplier supplier = in().threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashWithOrigin(origin)) { + try ( + ThreadContext.StoredContext ignore = ThreadContextAccess.doPrivileged( + () -> in().threadPool().getThreadContext().stashWithOrigin(origin) + ) + ) { super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6c6049f04231b..509cd732357d6 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -416,6 +416,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesReference; @@ -2148,7 +2149,9 @@ protected void ActionListener listener ) { ThreadContext threadContext = threadPool().getThreadContext(); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged(() -> threadContext.stashAndMergeHeaders(headers)) + ) { super.doExecute(action, request, listener); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 6158461c7d4e9..6242247f34a93 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -42,8 +42,10 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; @@ -245,6 +247,13 @@ public GroupShardsIterator searchShards( preference = Preference.PRIMARY.type(); } + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) + && IndexModule.DataLocalityType.PARTIAL.name() + .equals(indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())) + && (preference == null || preference.isEmpty())) { + preference = Preference.PRIMARY_FIRST.type(); + } + ShardIterator iterator = preferenceActiveShardIterator( shard, clusterState.nodes().getLocalNodeId(), diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b2443490dd973..212583d1fb14f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -154,6 +154,13 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting IGNORE_THROTTLE_FOR_REMOTE_RESTORE = Setting.boolSetting( + "cluster.routing.allocation.remote_primary.ignore_throttle", + true, + Property.Dynamic, + Property.NodeScope + ); + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( "cluster.routing.allocation.rebalance.primary.buffer", 0.10f, @@ -173,6 +180,8 @@ public class BalancedShardsAllocator implements ShardsAllocator { private volatile WeightFunction weightFunction; private volatile float threshold; + private volatile boolean ignoreThrottleInRestore; + public BalancedShardsAllocator(Settings settings) { this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @@ -182,6 +191,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + setIgnoreThrottleInRestore(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.get(settings)); updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); @@ -195,6 +205,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(IGNORE_THROTTLE_FOR_REMOTE_RESTORE, this::setIgnoreThrottleInRestore); } /** @@ -205,6 +216,10 @@ private void setMovePrimaryFirst(boolean movePrimaryFirst) { setShardMovementStrategy(this.shardMovementStrategy); } + private void setIgnoreThrottleInRestore(boolean ignoreThrottleInRestore) { + this.ignoreThrottleInRestore = ignoreThrottleInRestore; + } + /** * Sets the correct Shard movement strategy to use. * If users are still using deprecated setting `move_primary_first`, we want behavior to remain unchanged. @@ -282,7 +297,8 @@ public void allocate(RoutingAllocation allocation) { weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -304,7 +320,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -459,6 +476,7 @@ void updateRebalanceConstraint(String constraint, boolean add) { public static class ModelNode implements Iterable { private final Map indices = new HashMap<>(); private int numShards = 0; + private int numPrimaryShards = 0; private final RoutingNode routingNode; ModelNode(RoutingNode routingNode) { @@ -492,7 +510,7 @@ public int numPrimaryShards(String idx) { } public int numPrimaryShards() { - return indices.values().stream().mapToInt(index -> index.numPrimaryShards()).sum(); + return numPrimaryShards; } public int highestPrimary(String index) { @@ -510,6 +528,10 @@ public void addShard(ShardRouting shard) { indices.put(index.getIndexId(), index); } index.addShard(shard); + if (shard.primary()) { + numPrimaryShards++; + } + numShards++; } @@ -521,6 +543,11 @@ public void removeShard(ShardRouting shard) { indices.remove(shard.getIndexName()); } } + + if (shard.primary()) { + numPrimaryShards--; + } + numShards--; } @@ -558,7 +585,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 00eb79add9f1d..7e4ae58548c55 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.IntroSorter; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; @@ -60,6 +61,8 @@ public class LocalShardsBalancer extends ShardsBalancer { private final boolean preferPrimaryBalance; private final boolean preferPrimaryRebalance; + + private final boolean ignoreThrottleInRestore; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -77,7 +80,8 @@ public LocalShardsBalancer( BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance, - boolean preferPrimaryRebalance + boolean preferPrimaryRebalance, + boolean ignoreThrottleInRestore ) { this.logger = logger; this.allocation = allocation; @@ -94,6 +98,7 @@ public LocalShardsBalancer( this.preferPrimaryBalance = preferPrimaryBalance; this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; + this.ignoreThrottleInRestore = ignoreThrottleInRestore; } /** @@ -918,7 +923,15 @@ AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); } - if (currentDecision.type() == Decision.Type.YES || currentDecision.type() == Decision.Type.THROTTLE) { + + // For REMOTE_STORE recoveries, THROTTLE is as good as NO as we want faster recoveries + // The side effect of this are increased relocations post these allocations. + boolean considerThrottleAsNo = ignoreThrottleInRestore + && shard.recoverySource().getType() == RecoverySource.Type.REMOTE_STORE + && shard.primary(); + + if (currentDecision.type() == Decision.Type.YES + || (currentDecision.type() == Decision.Type.THROTTLE && considerThrottleAsNo == false)) { final boolean updateMinNode; if (currentWeight == minWeight) { /* we have an equal weight tie breaking: diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 6234427445754..b2548a8976c73 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -61,6 +61,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; @@ -396,7 +397,7 @@ private void submitStateUpdateTask( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final UpdateTask updateTask = new UpdateTask( config.priority(), source, diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 4ab8255df7658..713de8cdd0fda 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -66,6 +66,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.Assertions; import org.opensearch.core.common.text.Text; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -1022,7 +1023,7 @@ public void submitStateUpdateTasks( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); List safeTasks = tasks.entrySet() .stream() diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 2f60c731bc554..a73e5d44b7e02 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -268,6 +268,7 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, + BalancedShardsAllocator.IGNORE_THROTTLE_FOR_REMOTE_RESTORE, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, BreakerSettings.CIRCUIT_BREAKER_TYPE, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 6e7d77d0c00d4..a4d60bc76127c 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -191,7 +191,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, - IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS, IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS, IndexModule.INDEX_RECOVERY_TYPE_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 906a27e9f398c..3e02a26aab488 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -45,11 +45,13 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.http.HttpTransportSettings; +import org.opensearch.secure_sm.ThreadContextPermission; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskThreadContextStatePropagator; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.security.Permission; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -111,6 +113,12 @@ public final class ThreadContext implements Writeable { */ public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; + // thread context permissions + + private static final Permission ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("markAsSystemContext"); + private static final Permission STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashAndMergeHeaders"); + private static final Permission STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashWithOrigin"); + private static final Logger logger = LogManager.getLogger(ThreadContext.class); private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; @@ -207,6 +215,10 @@ public Writeable captureAsWriteable() { * if it can't find the task in memory. */ public StoredContext stashWithOrigin(String origin) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION); + } final ThreadContext.StoredContext storedContext = stashContext(); putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); return storedContext; @@ -218,6 +230,10 @@ public StoredContext stashWithOrigin(String origin) { * that are already existing are preserved unless they are defaults. */ public StoredContext stashAndMergeHeaders(Map headers) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION); + } final ThreadContextStruct context = threadLocal.get(); Map newHeader = new HashMap<>(headers); newHeader.putAll(context.requestHeaders); @@ -554,8 +570,19 @@ boolean isDefaultContext() { /** * Marks this thread context as an internal system context. This signals that actions in this context are issued * by the system itself rather than by a user action. + * + * Usage of markAsSystemContext is guarded by a ThreadContextPermission. In order to use + * markAsSystemContext, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; */ public void markAsSystemContext() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION); + } threadLocal.set(threadLocal.get().setSystemContext(propagators)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java new file mode 100644 index 0000000000000..14f8b8d79bf4d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +import org.opensearch.SpecialPermission; +import org.opensearch.common.annotation.InternalApi; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This class wraps the {@link ThreadContext} operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + * + * @opensearch.internal + */ +@SuppressWarnings("removal") +@InternalApi +public final class ThreadContextAccess { + + private ThreadContextAccess() {} + + public static T doPrivileged(PrivilegedAction operation) { + SpecialPermission.check(); + return AccessController.doPrivileged(operation); + } + + public static void doPrivilegedVoid(Runnable action) { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedAction) () -> { + action.run(); + return null; + }); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 673ed8dbaa1c3..6c6b1126a78d6 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -73,13 +73,14 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private final long maxBatchSize; private static final short DEFAULT_SHARD_BATCH_SIZE = 2000; - private static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + public static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = "cluster.routing.allocation.shards_batch_gateway_allocator.primary_allocator_timeout"; - private static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + public static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = "cluster.routing.allocation.shards_batch_gateway_allocator.replica_allocator_timeout"; private TimeValue primaryShardsBatchGatewayAllocatorTimeout; private TimeValue replicaShardsBatchGatewayAllocatorTimeout; + public static final TimeValue MIN_ALLOCATOR_TIMEOUT = TimeValue.timeValueSeconds(20); /** * Number of shards we send in one batch to data nodes for fetching metadata @@ -92,16 +93,50 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { Setting.Property.NodeScope ); + /** + * Timeout for existing primary shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ public static final Setting PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic ); + /** + * Timeout for existing replica shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ public static final Setting REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic ); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 93ff1b78b1ac5..eab070e1c6c10 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -97,7 +97,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -183,52 +182,7 @@ public final class IndexModule { Property.PrivateIndex ); - /** Which lucene file extensions to load with the mmap directory when using hybridfs store. This settings is ignored if {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} is set. - * This is an expert setting. - * @see Lucene File Extensions. - * - * @deprecated This setting will be removed in OpenSearch 3.x. Use {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} instead. - */ - @Deprecated - public static final Setting> INDEX_STORE_HYBRID_MMAP_EXTENSIONS = Setting.listSetting( - "index.store.hybrid.mmap.extensions", - List.of("nvd", "dvd", "tim", "tip", "dim", "kdd", "kdi", "cfs", "doc"), - Function.identity(), - new Setting.Validator>() { - - @Override - public void validate(final List value) {} - - @Override - public void validate(final List value, final Map, Object> settings) { - if (value.equals(INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY)) == false) { - final List nioExtensions = (List) settings.get(INDEX_STORE_HYBRID_NIO_EXTENSIONS); - final List defaultNioExtensions = INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY); - if (nioExtensions.equals(defaultNioExtensions) == false) { - throw new IllegalArgumentException( - "Settings " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " & " - + INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey() - + " cannot both be set. Use " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " only." - ); - } - } - } - - @Override - public Iterator> settings() { - return List.>of(INDEX_STORE_HYBRID_NIO_EXTENSIONS).iterator(); - } - }, - Property.IndexScope, - Property.NodeScope, - Property.Deprecated - ); - - /** Which lucene file extensions to load with nio. All others will default to mmap. Takes precedence over {@link #INDEX_STORE_HYBRID_MMAP_EXTENSIONS}. + /** Which lucene file extensions to load with nio. All others will default to mmap. * This is an expert setting. * @see Lucene File Extensions. */ @@ -253,35 +207,6 @@ public Iterator> settings() { "vem" ), Function.identity(), - new Setting.Validator>() { - - @Override - public void validate(final List value) {} - - @Override - public void validate(final List value, final Map, Object> settings) { - if (value.equals(INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY)) == false) { - final List mmapExtensions = (List) settings.get(INDEX_STORE_HYBRID_MMAP_EXTENSIONS); - final List defaultMmapExtensions = INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY); - if (mmapExtensions.equals(defaultMmapExtensions) == false) { - throw new IllegalArgumentException( - "Settings " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " & " - + INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey() - + " cannot both be set. Use " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " only." - ); - } - } - } - - @Override - public Iterator> settings() { - return List.>of(INDEX_STORE_HYBRID_MMAP_EXTENSIONS).iterator(); - } - }, Property.IndexScope, Property.NodeScope ); diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index 3714561b63e44..9db5817450cd0 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -81,9 +81,13 @@ public Object missingObject(Object missingValue, boolean reversed) { return min ? Numbers.MIN_UNSIGNED_LONG_VALUE : Numbers.MAX_UNSIGNED_LONG_VALUE; } else { if (missingValue instanceof Number) { - return ((Number) missingValue); + return Numbers.toUnsignedLongExact((Number) missingValue); } else { - return new BigInteger(missingValue.toString()); + BigInteger missing = new BigInteger(missingValue.toString()); + if (missing.signum() < 0) { + throw new IllegalArgumentException("Value [" + missingValue + "] is out of range for an unsigned long"); + } + return missing; } } } diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java index f0200e72c3bc2..e230e37e6d826 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -9,38 +9,50 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.network.InetAddresses; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; import org.opensearch.common.unit.Fuzziness; import org.opensearch.geometry.Geometry; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.query.DerivedFieldQuery; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.AggregationScript; import org.opensearch.script.DerivedFieldScript; import org.opensearch.script.Script; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; +import java.net.InetAddress; import java.time.ZoneId; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; /** * MappedFieldType for Derived Fields * Contains logic to execute different type of queries on a derived field of given type. + * * @opensearch.internal */ @@ -49,6 +61,11 @@ public class DerivedFieldType extends MappedFieldType implements GeoShapeQueryab final FieldMapper typeFieldMapper; final Function indexableFieldGenerator; + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + return typeFieldMapper.mappedFieldType.docValueFormat(format, timeZone); + } + public DerivedFieldType( DerivedField derivedField, boolean isIndexed, @@ -134,6 +151,11 @@ public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLo ); } + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + return getFieldMapper().mappedFieldType.fielddataBuilder(fullyQualifiedIndexName, searchLookup); + } + @Override public Query termQuery(Object value, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); @@ -503,7 +525,7 @@ public Query existsQuery(QueryShardContext context) { @Override public boolean isAggregatable() { - return false; + return true; } private Query createConjuctionQuery(Query filterQuery, DerivedFieldQuery derivedFieldQuery) { @@ -529,4 +551,55 @@ public static DerivedFieldScript.LeafFactory getDerivedFieldLeafFactory( DerivedFieldScript.Factory factory = context.compile(script, DerivedFieldScript.CONTEXT); return factory.newFactory(script.getParams(), searchLookup); } + + public AggregationScript.LeafFactory getAggregationScript(QueryShardContext context) { + return new AggregationScript.LeafFactory() { + @Override + public AggregationScript newInstance(LeafReaderContext ctx) throws IOException { + final DerivedFieldValueFetcher derivedFieldValueFetcher = valueFetcher(context, context.lookup(), null); + derivedFieldValueFetcher.setNextReader(ctx); + final LeafSearchLookup leafSearchLookup = context.lookup().getLeafSearchLookup(ctx); + + return new AggregationScript(derivedField.getScript().getParams(), context.lookup(), ctx) { + @Override + public Object execute() { + return formatValues(derivedFieldValueFetcher.fetchValuesInternal(leafSearchLookup.source())); + } + + @Override + public void setDocument(int docid) { + super.setDocument(docid); + leafSearchLookup.source().setSegmentAndDocument(ctx, docid); + } + }; + } + + @Override + public boolean needs_score() { + return false; + } + }; + } + + // perform any formatting on the returned Object before passing to + // any values source. + private Object formatValues(List objects) { + // ips are returned as raw strings, format them as BytesRefs + // This ensures that ip_range aggs compare the bytesRef against ranges computed in the + // same way. + if (typeFieldMapper instanceof IpFieldMapper) { + return objects.stream().map(o -> (String) o).map(this::toBytesRef).collect(Collectors.toList()); + } + return objects; + } + + // format the ip string as BytesRef. + private BytesRef toBytesRef(String ip) { + if (ip == null) { + return null; + } + InetAddress address = InetAddresses.forString(ip); + byte[] bytes = InetAddressPoint.encode(address); + return new BytesRef(bytes); + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java index 7e5c9a3f3da93..3d0165f702fda 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java @@ -91,21 +91,22 @@ public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLo derivedField.getFormat() != null ? DateFormatter.forPattern(derivedField.getFormat()) : null ); - Function valueForDisplayUpdated = derivedField.getType().equals(DerivedFieldSupportedTypes.DATE.getName()) ? (o -> { + Function dateFormatter = derivedField.getType().equals(DerivedFieldSupportedTypes.DATE.getName()) ? (o -> { // this is needed to support date type for nested fields as they are required to be converted to long if (o instanceof String) { - return valueForDisplay.apply(((DateFieldMapper) typeFieldMapper).fieldType().parse((String) o)); + return ((DateFieldMapper) typeFieldMapper).fieldType().parse((String) o); } else { - return valueForDisplay.apply(o); + return o; } - }) : valueForDisplay; + }) : null; String subFieldName = name().substring(name().indexOf(".") + 1); return new ObjectDerivedFieldValueFetcher( subFieldName, getDerivedFieldLeafFactory(derivedField.getScript(), context, searchLookup == null ? context.lookup() : searchLookup), - valueForDisplayUpdated, - derivedField.getIgnoreMalformed() + valueForDisplay, + derivedField.getIgnoreMalformed(), + dateFormatter ); } @@ -115,6 +116,8 @@ static class ObjectDerivedFieldValueFetcher extends DerivedFieldValueFetcher { // TODO add it as part of index setting? private final boolean ignoreOnMalFormed; + private final Function dateFormatter; + ObjectDerivedFieldValueFetcher( String subField, DerivedFieldScript.LeafFactory derivedFieldScriptFactory, @@ -124,6 +127,20 @@ static class ObjectDerivedFieldValueFetcher extends DerivedFieldValueFetcher { super(derivedFieldScriptFactory, valueForDisplay); this.subField = subField; this.ignoreOnMalFormed = ignoreOnMalFormed; + this.dateFormatter = null; + } + + ObjectDerivedFieldValueFetcher( + String subField, + DerivedFieldScript.LeafFactory derivedFieldScriptFactory, + Function valueForDisplay, + boolean ignoreOnMalFormed, + Function dateFormatter + ) { + super(derivedFieldScriptFactory, valueForDisplay); + this.subField = subField; + this.ignoreOnMalFormed = ignoreOnMalFormed; + this.dateFormatter = dateFormatter; } @Override @@ -140,7 +157,7 @@ public List fetchValuesInternal(SourceLookup lookup) { if (nestedFieldObj instanceof List) { result.addAll((List) nestedFieldObj); } else { - result.add(nestedFieldObj); + result.add(dateFormatter != null ? dateFormatter.apply(nestedFieldObj) : nestedFieldObj); } } catch (OpenSearchParseException e) { if (!ignoreOnMalFormed) { diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index c6a1f5f27a875..fedf239871368 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -44,6 +44,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -98,7 +99,7 @@ public GlobalCheckpointSyncAction( public void updateGlobalCheckpointForShard(final ShardId shardId) { final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); execute(new Request(shardId), ActionListener.wrap(r -> {}, e -> { if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { logger.info(new ParameterizedMessage("{} global checkpoint sync failed", shardId), e); diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 5fa0a1a6459e7..e8ebf11ef0e5c 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -48,6 +48,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -122,7 +123,7 @@ final void backgroundSync(ShardId shardId, String primaryAllocationId, long prim final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final Request request = new Request(shardId, retentionLeases); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_background_sync", request); transportService.sendChildRequest( diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java index ca3c7e1d49700..9e8437ca78879 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java @@ -50,6 +50,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -137,7 +138,7 @@ final void sync( final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final Request request = new Request(shardId, retentionLeases); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_sync", request); transportService.sendChildRequest( diff --git a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java index a46b641d1423f..c963f8aa95b8d 100644 --- a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java @@ -45,7 +45,6 @@ import org.apache.lucene.store.SimpleFSLockFactory; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -57,8 +56,6 @@ import java.nio.file.Path; import java.util.HashSet; import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Factory for a filesystem directory @@ -100,21 +97,7 @@ protected Directory newFSDirectory(Path location, LockFactory lockFactory, Index case HYBRIDFS: // Use Lucene defaults final FSDirectory primaryDirectory = FSDirectory.open(location, lockFactory); - final Set nioExtensions; - final Set mmapExtensions = Set.copyOf(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS)); - if (mmapExtensions.equals( - new HashSet(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY)) - ) == false) { - // If the mmap extension setting was defined, then compute nio extensions by subtracting out the - // mmap extensions from the set of all extensions. - nioExtensions = Stream.concat( - IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY).stream(), - IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY).stream() - ).filter(e -> mmapExtensions.contains(e) == false).collect(Collectors.toUnmodifiableSet()); - } else { - // Otherwise, get the list of nio extensions from the nio setting - nioExtensions = Set.copyOf(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS)); - } + final Set nioExtensions = new HashSet<>(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS)); if (primaryDirectory instanceof MMapDirectory) { MMapDirectory mMapDirectory = (MMapDirectory) primaryDirectory; return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions), nioExtensions); diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 8f39aa194b06c..d1e2884956f5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -24,6 +24,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexNotFoundException; @@ -113,7 +114,7 @@ final void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint) { final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); final ReplicationTimer timer = new ReplicationTimer(); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 448cb3627651c..8684b1b383cab 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -263,6 +263,7 @@ import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; import org.opensearch.watcher.ResourceWatcherService; +import org.opensearch.wlm.WorkloadManagementTransportInterceptor; import javax.net.ssl.SNIHostName; @@ -1047,6 +1048,10 @@ protected Node( admissionControlService ); + WorkloadManagementTransportInterceptor workloadManagementTransportInterceptor = new WorkloadManagementTransportInterceptor( + threadPool + ); + final Collection secureSettingsFactories = pluginsService.filterPlugins(Plugin.class) .stream() .map(p -> p.getSecureSettingFactory(settings)) @@ -1054,7 +1059,10 @@ protected Node( .map(Optional::get) .collect(Collectors.toList()); - List transportInterceptors = List.of(admissionControlTransportInterceptor); + List transportInterceptors = List.of( + admissionControlTransportInterceptor, + workloadManagementTransportInterceptor + ); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class), diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index 0c173523fa7cd..7d0c1e2260de1 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -748,8 +748,9 @@ public void sendResponse(RestResponse response) { // over so we need to populate those **before** that, if possible. if (subscribed.get() == false) { prepareResponse(response.status(), Map.of("Content-Type", List.of(response.contentType()))); - Mono.ignoreElements(this).then(Mono.just(response)).subscribe(delegate::sendResponse); } + + Mono.ignoreElements(this).then(Mono.just(response)).subscribe(delegate::sendResponse); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java index ce6e32a7824c9..a38244fe9ff20 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java @@ -8,6 +8,7 @@ package org.opensearch.rest.action.document; +import com.google.protobuf.ExperimentalApi; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.bulk.BulkItemResponse; @@ -26,6 +27,7 @@ import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.http.HttpChunk; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; @@ -37,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.stream.Stream; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -57,6 +60,7 @@ * * @opensearch.api */ +@ExperimentalApi public class RestBulkStreamingAction extends BaseRestHandler { private static final BulkResponse EMPTY = new BulkResponse(new BulkItemResponse[0], 0L); private final boolean allowExplicitIndex; @@ -95,6 +99,18 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final StreamingRestChannelConsumer consumer = (channel) -> { final MediaType mediaType = request.getMediaType(); + // We prepare (and more importantly, validate) the templated BulkRequest instance: in case the parameters + // are incorrect, we are going to fail the request immediately, instead of producing a possibly large amount + // of failed chunks. + FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + BulkRequest prepareBulkRequest = Requests.bulkRequest(); + if (waitForActiveShards != null) { + prepareBulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); + } + + prepareBulkRequest.timeout(timeout); + prepareBulkRequest.setRefreshPolicy(refresh); + // Set the content type and the status code before sending the response stream over channel.prepareResponse(RestStatus.OK, Map.of("Content-Type", List.of(mediaType.mediaTypeWithoutParameters()))); @@ -105,17 +121,17 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC // TODOs: // - add batching (by interval and/or count) // - eliminate serialization inefficiencies - Flux.from(channel).map(chunk -> { - FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + Flux.from(channel).zipWith(Flux.fromStream(Stream.generate(() -> { BulkRequest bulkRequest = Requests.bulkRequest(); - if (waitForActiveShards != null) { - bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); - } - - bulkRequest.timeout(timeout); - bulkRequest.setRefreshPolicy(refresh); - - try { + bulkRequest.waitForActiveShards(prepareBulkRequest.waitForActiveShards()); + bulkRequest.timeout(prepareBulkRequest.timeout()); + bulkRequest.setRefreshPolicy(prepareBulkRequest.getRefreshPolicy()); + return bulkRequest; + }))).map(t -> { + final HttpChunk chunk = t.getT1(); + final BulkRequest bulkRequest = t.getT2(); + + try (chunk) { bulkRequest.add( chunk.content(), defaultIndex, @@ -168,7 +184,17 @@ public void onFailure(Exception ex) { } catch (IOException ex) { throw new UncheckedIOException(ex); } - })).subscribe(); + })).onErrorComplete(ex -> { + if (ex instanceof Error) { + return false; + } + try { + channel.sendResponse(new BytesRestResponse(channel, (Exception) ex)); + return true; + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + }).subscribe(); }; return channel -> { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java index d006b15df327c..b6c8fe5d4802c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java @@ -36,6 +36,7 @@ import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexGeoPointFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.DerivedFieldType; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.query.QueryShardContext; @@ -183,6 +184,12 @@ private static ValuesSourceConfig internalResolve( valuesSourceType = defaultValueSourceType; } DocValueFormat docValueFormat = resolveFormat(format, valuesSourceType, timeZone, fieldType); + + // If we are aggregating on derived field set the agg script. + if (fieldType instanceof DerivedFieldType) { + aggregationScript = ((DerivedFieldType) fieldType).getAggregationScript(context); + } + config = new ValuesSourceConfig( valuesSourceType, fieldContext, @@ -336,7 +343,7 @@ private ValuesSource ConstructValuesSource(Object missing, DocValueFormat format if (this.unmapped) { vs = valueSourceType().getEmpty(); } else { - if (fieldContext() == null) { + if (fieldContext() == null || fieldType() instanceof DerivedFieldType) { // Script case vs = valueSourceType().getScript(script(), scriptValueType()); } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java index 349bd8e14edf6..30f7494ea2d18 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support.values; import org.apache.lucene.search.Scorable; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.ScorerAware; import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.fielddata.SortedBinaryDocValues; @@ -61,7 +62,11 @@ private void set(int i, Object o) { values[i].clear(); } else { CollectionUtils.ensureNoSelfReferences(o, "ScriptBytesValues value"); - values[i].copyChars(o.toString()); + if (o instanceof BytesRef) { + values[i].copyBytes((BytesRef) o); + } else { + values[i].copyChars(o.toString()); + } } } diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java index 8a5f6dfffb036..8f0ee52ac3acd 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java @@ -40,6 +40,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; @@ -136,7 +137,7 @@ void collectNodes(ActionListener> listener) { new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we stash any context here since this is an internal execution and should not leak any existing context information - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final ClusterStateRequest request = new ClusterStateRequest(); request.clear(); diff --git a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java index 07ba96b135189..1d94228218fd0 100644 --- a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java @@ -47,6 +47,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -349,7 +350,7 @@ private void collectRemoteNodes(Iterator> seedNodes, Act try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we stash any context here since this is an internal execution and should not leak any // existing context information. - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); transportService.sendRequest( connection, ClusterStateAction.NAME, diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java new file mode 100644 index 0000000000000..4eb413be61b72 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.tasks.CancellableTask; + +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +import static org.opensearch.search.SearchService.NO_TIMEOUT; + +/** + * Base class to define QueryGroup tasks + */ +public class QueryGroupTask extends CancellableTask { + + private static final Logger logger = LogManager.getLogger(QueryGroupTask.class); + public static final String QUERY_GROUP_ID_HEADER = "queryGroupId"; + public static final Supplier DEFAULT_QUERY_GROUP_ID_SUPPLIER = () -> "DEFAULT_QUERY_GROUP"; + private String queryGroupId; + + public QueryGroupTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); + } + + public QueryGroupTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + TimeValue cancelAfterTimeInterval + ) { + super(id, type, action, description, parentTaskId, headers, cancelAfterTimeInterval); + } + + /** + * This method should always be called after calling setQueryGroupId at least once on this object + * @return task queryGroupId + */ + public final String getQueryGroupId() { + if (queryGroupId == null) { + logger.warn("QueryGroup _id can't be null, It should be set before accessing it. This is abnormal behaviour "); + } + return queryGroupId; + } + + /** + * sets the queryGroupId from threadContext into the task itself, + * This method was defined since the queryGroupId can only be evaluated after task creation + * @param threadContext current threadContext + */ + public final void setQueryGroupId(final ThreadContext threadContext) { + this.queryGroupId = Optional.ofNullable(threadContext) + .map(threadContext1 -> threadContext1.getHeader(QUERY_GROUP_ID_HEADER)) + .orElse(DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); + } + + @Override + public boolean shouldCancelChildrenOnCancellation() { + return false; + } +} diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java new file mode 100644 index 0000000000000..848df8712549a --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportInterceptor; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * This class is used to intercept search traffic requests and populate the queryGroupId header in task headers + */ +public class WorkloadManagementTransportInterceptor implements TransportInterceptor { + private final ThreadPool threadPool; + + public WorkloadManagementTransportInterceptor(ThreadPool threadPool) { + this.threadPool = threadPool; + } + + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + return new RequestHandler(threadPool, actualHandler); + } + + /** + * This class is mainly used to populate the queryGroupId header + * @param T is Search related request + */ + public static class RequestHandler implements TransportRequestHandler { + + private final ThreadPool threadPool; + TransportRequestHandler actualHandler; + + public RequestHandler(ThreadPool threadPool, TransportRequestHandler actualHandler) { + this.threadPool = threadPool; + this.actualHandler = actualHandler; + } + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + if (isSearchWorkloadRequest(task)) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + actualHandler.messageReceived(request, channel, task); + } + + boolean isSearchWorkloadRequest(Task task) { + return task instanceof QueryGroupTask; + } + } +} diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 55e8db0d9c6a3..22e445f7d9022 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -48,6 +48,8 @@ grant codeBase "${codebase.opensearch}" { permission java.lang.RuntimePermission "setContextClassLoader"; // needed for SPI class loading permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; + permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; }; //// Very special jar permissions: diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index 0abfd7ef22ae7..19f8adbe003ca 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -157,4 +157,7 @@ grant { permission java.lang.RuntimePermission "reflectionFactoryAccess"; permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; + permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; + permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; }; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index 36d984b7eb99b..562e293083633 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -225,7 +226,7 @@ public void testUpdateTemplates() { service.upgradesInProgress.set(additionsCount + deletionsCount + 2); // +2 to skip tryFinishUpgrade final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); service.upgradeTemplates(additions, deletions); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 4f3e50eebb9c6..ad8b48d56c417 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -41,9 +41,11 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -1054,6 +1056,68 @@ public void testSearchableSnapshotPrimaryDefault() throws Exception { } } + @SuppressForbidden(reason = "feature flag overrides") + public void testPartialIndexPrimaryDefault() throws Exception { + System.setProperty(FeatureFlags.TIERED_REMOTE_INDEX, "true"); + final int numIndices = 1; + final int numShards = 2; + final int numReplicas = 2; + final String[] indexNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indexNames[i] = "test" + i; + } + // The first index is a partial index + final String indexName = indexNames[0]; + ClusterService clusterService = null; + ThreadPool threadPool = null; + + try { + OperationRouting opRouting = new OperationRouting( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + ClusterState state = ClusterStateCreationUtils.stateWithAssignedPrimariesAndReplicas(indexNames, numShards, numReplicas); + threadPool = new TestThreadPool("testPartialIndexPrimaryDefault"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + + // Update the index config within the cluster state to modify the index to a partial index + IndexMetadata partialIndexMetadata = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(state.metadata().index(indexName).getSettings()) + .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL) + .build() + ) + .build(); + Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()) + .put(partialIndexMetadata, false) + .generateClusterUuidIfNeeded(); + state = ClusterState.builder(state).metadata(metadataBuilder.build()).build(); + + // Verify default preference is primary only + GroupShardsIterator groupIterator = opRouting.searchShards(state, indexNames, null, null); + assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); + + for (ShardIterator shardIterator : groupIterator) { + assertTrue("Only primary should exist with no preference", shardIterator.nextOrNull().primary()); + } + + // Verify alternative preference can be applied to a partial index + groupIterator = opRouting.searchShards(state, indexNames, null, "_replica"); + assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); + + for (ShardIterator shardIterator : groupIterator) { + assertThat("Replica shards will be returned", shardIterator.size(), equalTo(numReplicas)); + assertFalse("Returned shard should be a replica", shardIterator.nextOrNull().primary()); + } + } finally { + IOUtils.close(clusterService); + terminate(threadPool); + System.setProperty(FeatureFlags.TIERED_REMOTE_INDEX, "false"); + } + } + private DiscoveryNode[] setupNodes() { // Sets up two data nodes in zone-a and one data node in zone-b List zones = Arrays.asList("a", "a", "b"); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java index d29249cef0818..11a43019f648e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.action.support.replication.ClusterStateCreationUtils; -import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.node.DiscoveryNode; @@ -50,7 +49,6 @@ import org.opensearch.cluster.routing.allocation.decider.Decision.Type; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.snapshots.SnapshotShardSizeInfo; import java.util.Arrays; import java.util.Collections; @@ -398,19 +396,6 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca return Tuple.tuple(clusterState, rebalanceDecision); } - private RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { - RoutingAllocation allocation = new RoutingAllocation( - deciders, - new RoutingNodes(state, false), - state, - ClusterInfo.EMPTY, - SnapshotShardSizeInfo.EMPTY, - System.nanoTime() - ); - allocation.debugDecision(true); - return allocation; - } - private void assertAssignedNodeRemainsSame( BalancedShardsAllocator allocator, RoutingAllocation routingAllocation, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java new file mode 100644 index 0000000000000..6df2ffc6149d5 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.IGNORE_THROTTLE_FOR_REMOTE_RESTORE; + +public class DecideAllocateUnassignedTests extends OpenSearchAllocationTestCase { + public void testAllocateUnassignedRemoteRestore_IgnoreThrottle() { + final String[] indices = { "idx1" }; + // Create a cluster state with 1 indices, each with 1 started primary shard, and only + // one node initially so that all primary shards get allocated to the same node. + // + // When we add 1 more 1 index with 1 started primary shard and 1 more node , if the new node throttles the recovery + // shard should get assigned on the older node if IgnoreThrottle is set to true + ClusterState clusterState = ClusterStateCreationUtils.state(1, indices, 1); + clusterState = addNodesToClusterState(clusterState, 1); + clusterState = addRestoringIndexToClusterState(clusterState, "idx2"); + List allocationDeciders = getAllocationDecidersThrottleOnNode1(); + RoutingAllocation routingAllocation = newRoutingAllocation(new AllocationDeciders(allocationDeciders), clusterState); + // allocate and get the node that is now relocating + Settings build = Settings.builder().put(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.getKey(), true).build(); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(build); + allocator.allocate(routingAllocation); + assertEquals(routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), "node_0"); + assertEquals(routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getIndexName(), "idx2"); + assertFalse(routingAllocation.routingNodes().hasUnassignedPrimaries()); + } + + public void testAllocateUnassignedRemoteRestore() { + final String[] indices = { "idx1" }; + // Create a cluster state with 1 indices, each with 1 started primary shard, and only + // one node initially so that all primary shards get allocated to the same node. + // + // When we add 1 more 1 index with 1 started primary shard and 1 more node , if the new node throttles the recovery + // shard should remain unassigned if IgnoreThrottle is set to false + ClusterState clusterState = ClusterStateCreationUtils.state(1, indices, 1); + clusterState = addNodesToClusterState(clusterState, 1); + clusterState = addRestoringIndexToClusterState(clusterState, "idx2"); + List allocationDeciders = getAllocationDecidersThrottleOnNode1(); + RoutingAllocation routingAllocation = newRoutingAllocation(new AllocationDeciders(allocationDeciders), clusterState); + // allocate and get the node that is now relocating + Settings build = Settings.builder().put(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.getKey(), false).build(); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(build); + allocator.allocate(routingAllocation); + assertEquals(routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), 0); + assertTrue(routingAllocation.routingNodes().hasUnassignedPrimaries()); + } + + private static List getAllocationDecidersThrottleOnNode1() { + // Allocation Deciders to throttle on `node_1` + final Set throttleNodes = new HashSet<>(); + throttleNodes.add("node_1"); + AllocationDecider allocationDecider = new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (throttleNodes.contains(node.nodeId())) { + return Decision.THROTTLE; + } + return Decision.YES; + } + }; + List allocationDeciders = Arrays.asList(allocationDecider); + return allocationDeciders; + } + + private ClusterState addNodesToClusterState(ClusterState clusterState, int nodeId) { + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); + DiscoveryNode discoveryNode = newNode("node_" + nodeId); + nodesBuilder.add(discoveryNode); + return ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + } + + private ClusterState addRestoringIndexToClusterState(ClusterState clusterState, String index) { + final int primaryTerm = 1 + randomInt(200); + final ShardId shardId = new ShardId(index, "_na_", 0); + + IndexMetadata indexMetadata = IndexMetadata.builder(index) + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .primaryTerm(0, primaryTerm) + .build(); + + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, null); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRoutingRemoteRestore(index, shardId, null, null, true, ShardRoutingState.UNASSIGNED, unassignedInfo) + ); + final IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build(); + + IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexMetadata); + indexMetadataBuilder.putInSyncAllocationIds( + 0, + indexShardRoutingTable.activeShards() + .stream() + .map(ShardRouting::allocationId) + .map(AllocationId::getId) + .collect(Collectors.toSet()) + ); + ClusterState.Builder state = ClusterState.builder(clusterState); + state.metadata(Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder.build(), false).generateClusterUuidIfNeeded()); + state.routingTable( + RoutingTable.builder(clusterState.routingTable()) + .add(IndexRoutingTable.builder(indexMetadata.getIndex()).addIndexShard(indexShardRoutingTable)) + .build() + ); + return state.build(); + } + +} diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 4e66575711046..5992ffa1465b4 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -206,7 +206,7 @@ public void testStashWithOrigin() { } assertNull(threadContext.getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); - try (ThreadContext.StoredContext storedContext = threadContext.stashWithOrigin(origin)) { + try (ThreadContext.StoredContext storedContext = ThreadContextAccess.doPrivileged(() -> threadContext.stashWithOrigin(origin))) { assertEquals(origin, threadContext.getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); assertNull(threadContext.getTransient("foo")); assertNull(threadContext.getTransient("bar")); @@ -231,7 +231,7 @@ public void testStashAndMerge() { HashMap toMerge = new HashMap<>(); toMerge.put("foo", "baz"); toMerge.put("simon", "says"); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(toMerge)) { + try (ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged(() -> threadContext.stashAndMergeHeaders(toMerge))) { assertEquals("bar", threadContext.getHeader("foo")); assertEquals("says", threadContext.getHeader("simon")); assertNull(threadContext.getTransient("ctx.foo")); @@ -493,7 +493,13 @@ public void testStashAndMergeWithModifiedDefaults() { ThreadContext threadContext = new ThreadContext(build); HashMap toMerge = new HashMap<>(); toMerge.put("default", "2"); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(toMerge)) { + ThreadContext finalThreadContext1 = threadContext; + HashMap finalToMerge1 = toMerge; + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged( + () -> finalThreadContext1.stashAndMergeHeaders(finalToMerge1) + ) + ) { assertEquals("2", threadContext.getHeader("default")); } @@ -502,7 +508,13 @@ public void testStashAndMergeWithModifiedDefaults() { threadContext.putHeader("default", "4"); toMerge = new HashMap<>(); toMerge.put("default", "2"); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(toMerge)) { + ThreadContext finalThreadContext2 = threadContext; + HashMap finalToMerge2 = toMerge; + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged( + () -> finalThreadContext2.stashAndMergeHeaders(finalToMerge2) + ) + ) { assertEquals("4", threadContext.getHeader("default")); } } @@ -565,7 +577,7 @@ public void testPreservesThreadsOriginalContextOnRunException() throws IOExcepti threadContext.putHeader("foo", "bar"); boolean systemContext = randomBoolean(); if (systemContext) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); } threadContext.putTransient("foo", "bar_transient"); withContext = threadContext.preserveContext(new AbstractRunnable() { @@ -736,7 +748,7 @@ public void testMarkAsSystemContext() throws IOException { assertFalse(threadContext.isSystemContext()); try (ThreadContext.StoredContext context = threadContext.stashContext()) { assertFalse(threadContext.isSystemContext()); - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); assertTrue(threadContext.isSystemContext()); } assertFalse(threadContext.isSystemContext()); @@ -761,7 +773,7 @@ public void testSystemContextWithPropagator() { assertEquals(Integer.valueOf(1), threadContext.getTransient("test_transient_propagation_key")); assertEquals("bar", threadContext.getHeader("foo")); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); assertNull(threadContext.getHeader("foo")); assertNull(threadContext.getTransient("test_transient_propagation_key")); assertEquals("1", threadContext.getHeader("default")); @@ -793,7 +805,7 @@ public void testSerializeSystemContext() throws IOException { threadContext.writeTo(out); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { assertEquals("test", threadContext.getTransient("test_transient_propagation_key")); - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); threadContext.writeTo(outFromSystemContext); assertNull(threadContext.getHeader("foo")); assertNull(threadContext.getTransient("test_transient_propagation_key")); diff --git a/server/src/test/java/org/opensearch/core/RestStatusTests.java b/server/src/test/java/org/opensearch/core/RestStatusTests.java index f8dba99aa8b60..fbd238bd035d0 100644 --- a/server/src/test/java/org/opensearch/core/RestStatusTests.java +++ b/server/src/test/java/org/opensearch/core/RestStatusTests.java @@ -55,7 +55,11 @@ public void testStatusReturnsFailureStatusWhenFailuresExist() { heapOfFailures.add(failure); } - assertEquals(heapOfFailures.peek().status(), RestStatus.status(successfulShards, totalShards, failures)); + final RestStatus status = heapOfFailures.peek().status(); + // RestStatus.status will return RestStatus.OK when the highest failure code is 100 level. + final RestStatus expected = status.getStatusFamilyCode() == 1 ? RestStatus.OK : status; + + assertEquals(expected, RestStatus.status(successfulShards, totalShards, failures)); } public void testSerialization() throws IOException { diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index bd56123f6df1f..1596a0b566b28 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -47,6 +47,11 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY; + public class GatewayAllocatorTests extends OpenSearchAllocationTestCase { private final Logger logger = LogManager.getLogger(GatewayAllocatorTests.class); @@ -368,6 +373,56 @@ public void testCreatePrimaryAndReplicaExecutorOfSizeTwo() { assertEquals(executor.getTimeoutAwareRunnables().size(), 2); } + public void testPrimaryAllocatorTimeout() { + // Valid setting with timeout = 20s + Settings build = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "20s").build(); + assertEquals(20, PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Valid setting with timeout > 20s + build = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "30000ms").build(); + assertEquals(30, PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Invalid setting with timeout < 20s + Settings lessThan20sSetting = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "10s").build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(lessThan20sSetting) + ); + assertEquals( + "Setting [" + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + "] should be more than 20s or -1ms to disable timeout", + iae.getMessage() + ); + + // Valid setting with timeout = -1 + build = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "-1").build(); + assertEquals(-1, PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getMillis()); + } + + public void testReplicaAllocatorTimeout() { + // Valid setting with timeout = 20s + Settings build = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "20s").build(); + assertEquals(20, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Valid setting with timeout > 20s + build = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "30000ms").build(); + assertEquals(30, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Invalid setting with timeout < 20s + Settings lessThan20sSetting = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "10s").build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(lessThan20sSetting) + ); + assertEquals( + "Setting [" + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + "] should be more than 20s or -1ms to disable timeout", + iae.getMessage() + ); + + // Valid setting with timeout = -1 + build = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "-1").build(); + assertEquals(-1, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getMillis()); + } + private void createIndexAndUpdateClusterState(int count, int numberOfShards, int numberOfReplicas) { if (count == 0) return; Metadata.Builder metadata = Metadata.builder(); diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java index f65acd0db0627..fe9db24f494ad 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -15,14 +15,30 @@ import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.LongField; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.AggregationScript; import org.opensearch.script.Script; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import java.io.IOException; import java.util.List; import static org.apache.lucene.index.IndexOptions.NONE; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class DerivedFieldTypeTests extends FieldTypeTestCase { @@ -100,4 +116,57 @@ public void testObjectType() { public void testUnsupportedType() { expectThrows(IllegalArgumentException.class, () -> createDerivedFieldType("match_only_text")); } + + public void testGetAggregationScript_keyword() throws IOException { + DerivedFieldType dft = spy(createDerivedFieldType("keyword")); + assertTrue(dft.isAggregatable()); + QueryShardContext mockContext = mock(QueryShardContext.class); + List expected = List.of("foo"); + mockValueFetcherForAggs(mockContext, dft, expected); + + AggregationScript.LeafFactory aggregationScript = dft.getAggregationScript(mockContext); + // have to use a memoryIndex because we can't mock leafReaderContext + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + AggregationScript script = aggregationScript.newInstance(leafReaderContext); + + Object result = script.execute(); + assertEquals(expected, result); + } + + public void testGetAggregationScript_ip() throws IOException { + DerivedFieldType dft = spy(createDerivedFieldType("ip")); + assertTrue(dft.isAggregatable()); + QueryShardContext mockContext = mock(QueryShardContext.class); + List expected = List.of("192.168.0.1"); + LeafSearchLookup leafSearchLookup = mockValueFetcherForAggs(mockContext, dft, expected); + SourceLookup sourceLookup = mock(SourceLookup.class); + when(leafSearchLookup.source()).thenReturn(sourceLookup); + AggregationScript.LeafFactory aggregationScript = dft.getAggregationScript(mockContext); + assertFalse(aggregationScript.needs_score()); + // have to use a memoryIndex because we can't mock leafReaderContext + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + AggregationScript script = aggregationScript.newInstance(leafReaderContext); + + // test setDocument + int docid = 1; + script.setDocument(docid); + verify(sourceLookup, times(1)).setSegmentAndDocument(any(), eq(docid)); + + // test execute + List result = (List) script.execute(); + assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString((String) expected.get(0)))), result.get(0)); + } + + private static LeafSearchLookup mockValueFetcherForAggs(QueryShardContext mockContext, DerivedFieldType dft, List expected) { + SearchLookup searchLookup = mock(SearchLookup.class); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(searchLookup.getLeafSearchLookup(any())).thenReturn(leafLookup); + when(mockContext.lookup()).thenReturn(searchLookup); + DerivedFieldValueFetcher valueFetcher = mock(DerivedFieldValueFetcher.class); + when(valueFetcher.fetchValuesInternal(any())).thenReturn(expected); + doReturn(valueFetcher).when(dft).valueFetcher(any(), any(), any()); + return leafLookup; + } } diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index 2fffebbcf5f1f..95113b7eeb370 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -96,7 +96,7 @@ public void testPreload() throws IOException { build = Settings.builder() .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "tip", "dim", "kdd", "kdi", "cfs", "doc") + .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "tip", "dim", "kdd", "kdi", "cfs", "doc", "new") .build(); try (Directory directory = newDirectory(build)) { assertTrue(FsDirectoryFactory.isHybridFs(directory)); @@ -108,7 +108,7 @@ public void testPreload() throws IOException { assertTrue(hybridDirectory.useDelegate("foo.tim")); assertTrue(hybridDirectory.useDelegate("foo.pos")); assertTrue(hybridDirectory.useDelegate("foo.pay")); - assertTrue(hybridDirectory.useDelegate("foo.new")); + assertFalse(hybridDirectory.useDelegate("foo.new")); assertFalse(hybridDirectory.useDelegate("foo.tip")); assertFalse(hybridDirectory.useDelegate("foo.dim")); assertFalse(hybridDirectory.useDelegate("foo.kdd")); @@ -123,63 +123,6 @@ public void testPreload() throws IOException { assertTrue(preLoadMMapDirectory.useDelegate("foo.cfs")); assertTrue(preLoadMMapDirectory.useDelegate("foo.nvd")); } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .build(); - try (Directory directory = newDirectory(build)) { - assertTrue(FsDirectoryFactory.isHybridFs(directory)); - FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - // test custom hybrid mmap extensions - // true->mmap, false->nio - assertTrue(hybridDirectory.useDelegate("foo.nvd")); - assertTrue(hybridDirectory.useDelegate("foo.dvd")); - assertTrue(hybridDirectory.useDelegate("foo.tim")); - assertTrue(hybridDirectory.useDelegate("foo.pos")); - assertTrue(hybridDirectory.useDelegate("foo.new")); - assertFalse(hybridDirectory.useDelegate("foo.pay")); - assertFalse(hybridDirectory.useDelegate("foo.tip")); - assertFalse(hybridDirectory.useDelegate("foo.dim")); - assertFalse(hybridDirectory.useDelegate("foo.kdd")); - assertFalse(hybridDirectory.useDelegate("foo.kdi")); - assertFalse(hybridDirectory.useDelegate("foo.cfs")); - assertFalse(hybridDirectory.useDelegate("foo.doc")); - MMapDirectory delegate = hybridDirectory.getDelegate(); - assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); - assertWarnings( - "[index.store.hybrid.mmap.extensions] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version." - ); - } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .build(); - try { - newDirectory(build); - } catch (final Exception e) { - assertEquals( - "Settings index.store.hybrid.nio.extensions & index.store.hybrid.mmap.extensions cannot both be set. Use index.store.hybrid.nio.extensions only.", - e.getMessage() - ); - } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .build(); - try { - newDirectory(build); - } catch (final Exception e) { - assertEquals( - "Settings index.store.hybrid.nio.extensions & index.store.hybrid.mmap.extensions cannot both be set. Use index.store.hybrid.nio.extensions only.", - e.getMessage() - ); - } build = Settings.builder() .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") @@ -198,24 +141,6 @@ public void testPreload() throws IOException { MMapDirectory delegate = hybridDirectory.getDelegate(); assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey()) - .build(); - try (Directory directory = newDirectory(build)) { - assertTrue(FsDirectoryFactory.isHybridFs(directory)); - FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - // test custom hybrid mmap extensions - // true->mmap, false->nio - assertTrue(hybridDirectory.useDelegate("foo.new")); - assertFalse(hybridDirectory.useDelegate("foo.nvd")); - assertFalse(hybridDirectory.useDelegate("foo.dvd")); - assertFalse(hybridDirectory.useDelegate("foo.cfs")); - assertFalse(hybridDirectory.useDelegate("foo.doc")); - MMapDirectory delegate = hybridDirectory.getDelegate(); - assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); - } } private Directory newDirectory(Settings settings) throws IOException { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java new file mode 100644 index 0000000000000..2fb65d7fe3c46 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.DerivedField; +import org.opensearch.index.mapper.DerivedFieldResolver; +import org.opensearch.index.mapper.DerivedFieldResolverFactory; +import org.opensearch.index.mapper.DerivedFieldType; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +public class DerivedFieldAggregationTests extends AggregatorTestCase { + + private QueryShardContext mockContext; + private List docs; + + private static final String[][] raw_requests = new String[][] { + { "40.135.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "40.135.0.0" }, + { "232.0.0.0 GET /images/hm_bg.jpg HTTP/1.0", "400", "232.0.0.0" }, + { "26.1.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "26.1.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "200", "247.37.0.0" } }; + + @Before + public void init() { + super.initValuesSourceRegistry(); + // Create a mock QueryShardContext + mockContext = mock(QueryShardContext.class); + when(mockContext.index()).thenReturn(new Index("test_index", "uuid")); + when(mockContext.allowExpensiveQueries()).thenReturn(true); + + MapperService mockMapperService = mock(MapperService.class); + when(mockContext.getMapperService()).thenReturn(mockMapperService); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + // Mock IndexSettings + IndexSettings mockIndexSettings = new IndexSettings( + IndexMetadata.builder("test_index").settings(indexSettings).build(), + Settings.EMPTY + ); + when(mockMapperService.getIndexSettings()).thenReturn(mockIndexSettings); + when(mockContext.getIndexSettings()).thenReturn(mockIndexSettings); + docs = new ArrayList<>(); + for (String[] request : raw_requests) { + Document document = new Document(); + document.add(new TextField("raw_request", request[0], Field.Store.YES)); + document.add(new KeywordField("status", request[1], Field.Store.YES)); + docs.add(document); + } + } + + public void testSimpleTermsAggregationWithDerivedField() throws IOException { + MappedFieldType keywordFieldType = new KeywordFieldMapper.KeywordFieldType("status"); + + SearchLookup searchLookup = mock(SearchLookup.class); + SourceLookup sourceLookup = new SourceLookup(); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(leafLookup.source()).thenReturn(sourceLookup); + + // Mock DerivedFieldScript.Factory + DerivedFieldScript.Factory factory = (params, lookup) -> (DerivedFieldScript.LeafFactory) ctx -> { + when(searchLookup.getLeafSearchLookup(any())).thenReturn(leafLookup); + return new DerivedFieldScript(params, lookup, ctx) { + @Override + public void execute() { + addEmittedValue(raw_requests[sourceLookup.docId()][1]); + } + + @Override + public void setDocument(int docid) { + sourceLookup.setSegmentAndDocument(ctx, docid); + } + }; + }; + + DerivedField derivedField = new DerivedField("derived_field", "keyword", new Script("")); + DerivedFieldResolver resolver = DerivedFieldResolverFactory.createResolver( + mockContext, + Collections.emptyMap(), + Collections.singletonList(derivedField), + true + ); + + // spy on the resolved type so we can mock the valuefetcher + DerivedFieldType derivedFieldType = spy((DerivedFieldType) resolver.resolve("derived_field")); + DerivedFieldScript.LeafFactory leafFactory = factory.newFactory((new Script("")).getParams(), searchLookup); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(leafFactory, null); + doReturn(valueFetcher).when(derivedFieldType).valueFetcher(any(), any(), any()); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("derived_terms").field("status").size(10); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + for (Document d : docs) { + iw.addDocument(d); + } + }, (InternalTerms result) -> { + assertEquals(2, result.getBuckets().size()); + List buckets = result.getBuckets(); + assertEquals("200", buckets.get(0).getKey()); + assertEquals(3, buckets.get(0).getDocCount()); + assertEquals("400", buckets.get(1).getKey()); + assertEquals(3, buckets.get(1).getDocCount()); + }, keywordFieldType, derivedFieldType); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java index 33d9a63f61a35..568c3c950f588 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -37,9 +37,12 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.fielddata.SortedBinaryDocValues; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -334,4 +337,40 @@ public void testFieldAlias() throws Exception { assertEquals(new BytesRef("value"), values.nextValue()); } } + + public void testDerivedField() throws Exception { + String script = "derived_field_script"; + String derived_field = "derived_keyword"; + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("derived") + .startObject(derived_field) + .field("type", "keyword") + .startObject("script") + .field("source", script) + .field("lang", "mockscript") + .endObject() + .endObject() + .endObject() + .endObject(); + IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); + ValuesSourceConfig config = ValuesSourceConfig.resolve( + context, + null, + derived_field, + null, + null, + null, + null, + CoreValuesSourceType.BYTES + ); + assertNotNull(script); + assertEquals(ValuesSource.Bytes.Script.class, config.getValuesSource().getClass()); + } + } } diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java index bf11bcaf39a96..98dfc367c20f5 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext.StoredContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.MetricsTelemetry; @@ -260,7 +261,7 @@ public void testSpanNotPropagatedToChildSystemThreadContext() { try (StoredContext ignored = threadContext.stashContext()) { assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); } } diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java new file mode 100644 index 0000000000000..d292809c30124 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; + +import static org.opensearch.wlm.QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER; +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; + +public class QueryGroupTaskTests extends OpenSearchTestCase { + private ThreadPool threadPool; + private QueryGroupTask sut; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + sut = new QueryGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testSuccessfulSetQueryGroupId() { + sut.setQueryGroupId(threadPool.getThreadContext()); + assertEquals(DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(), sut.getQueryGroupId()); + + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, "akfanglkaglknag2332"); + + sut.setQueryGroupId(threadPool.getThreadContext()); + assertEquals("akfanglkaglknag2332", sut.getQueryGroupId()); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java new file mode 100644 index 0000000000000..db4e5e45d49ed --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.wlm.WorkloadManagementTransportInterceptor.RequestHandler; + +import static org.opensearch.threadpool.ThreadPool.Names.SAME; + +public class WorkloadManagementTransportInterceptorTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private WorkloadManagementTransportInterceptor sut; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + sut = new WorkloadManagementTransportInterceptor(threadPool); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testInterceptHandler() { + TransportRequestHandler requestHandler = sut.interceptHandler("Search", SAME, false, null); + assertTrue(requestHandler instanceof RequestHandler); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java new file mode 100644 index 0000000000000..789c02345e774 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.action.index.IndexRequest; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +import java.util.Collections; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + +public class WorkloadManagementTransportRequestHandlerTests extends OpenSearchTestCase { + private WorkloadManagementTransportInterceptor.RequestHandler sut; + private ThreadPool threadPool; + + private TestTransportRequestHandler actualHandler; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + actualHandler = new TestTransportRequestHandler<>(); + + sut = new WorkloadManagementTransportInterceptor.RequestHandler<>(threadPool, actualHandler); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testMessageReceivedForSearchWorkload() throws Exception { + ShardSearchRequest request = mock(ShardSearchRequest.class); + QueryGroupTask spyTask = getSpyTask(); + + sut.messageReceived(request, mock(TransportChannel.class), spyTask); + assertTrue(sut.isSearchWorkloadRequest(spyTask)); + } + + public void testMessageReceivedForNonSearchWorkload() throws Exception { + IndexRequest indexRequest = mock(IndexRequest.class); + Task task = mock(Task.class); + sut.messageReceived(indexRequest, mock(TransportChannel.class), task); + assertFalse(sut.isSearchWorkloadRequest(task)); + assertEquals(1, actualHandler.invokeCount); + } + + private static QueryGroupTask getSpyTask() { + final QueryGroupTask task = new QueryGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); + + return spy(task); + } + + private static class TestTransportRequestHandler implements TransportRequestHandler { + int invokeCount = 0; + + @Override + public void messageReceived(TransportRequest request, TransportChannel channel, Task task) throws Exception { + invokeCount += 1; + } + }; +} diff --git a/server/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/test/resources/org/opensearch/bootstrap/test.policy index 7b0a9b3d5d709..c2b5a8e9c0a4e 100644 --- a/server/src/test/resources/org/opensearch/bootstrap/test.policy +++ b/server/src/test/resources/org/opensearch/bootstrap/test.policy @@ -7,7 +7,7 @@ */ grant { - // allow to test Security policy and codebases + // allow to test Security policy and codebases permission java.util.PropertyPermission "*", "read,write"; permission java.security.SecurityPermission "createPolicy.JavaPolicy"; }; diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index f6113860e3907..34b8c58a9c5b2 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -287,6 +288,19 @@ public static ClusterState startShardsAndReroute( return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting"); } + protected RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { + RoutingAllocation allocation = new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + allocation.debugDecision(true); + return allocation; + } + public static class TestAllocateDecision extends AllocationDecider { private final Decision decision; @@ -465,5 +479,6 @@ public void allocateUnassigned( unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes()); } } + } } diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java index f67108345550f..c7c71f0f569e5 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java @@ -205,6 +205,32 @@ public static ShardRouting newShardRouting( ); } + public static ShardRouting newShardRoutingRemoteRestore( + String index, + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + UnassignedInfo unassignedInfo + ) { + return new ShardRouting( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + Version.V_EMPTY, + new IndexId(shardId.getIndexName(), shardId.getIndexName()) + ), + unassignedInfo, + buildAllocationId(state), + -1 + ); + } + public static ShardRouting newShardRouting( ShardId shardId, String currentNodeId, diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 53ef595c7931e..64f3dbc4fd967 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -44,6 +44,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.node.Node; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; @@ -134,7 +135,7 @@ public void run() { scheduledNextTask = false; final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); task.run(); } if (waitForPublish == false) {