diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e13ad9b3ed4a..2011769858699 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,8 +98,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) - Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) -- Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) ### Dependencies - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) @@ -125,6 +126,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) - [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) ### Deprecated @@ -138,6 +140,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) - Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) +- Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) ### Security diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 0d98cba35448f..f19437979c852 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -29,6 +29,10 @@ jna = 5.13.0 netty = 4.1.100.Final joda = 2.12.2 +# project reactor +reactor_netty = 1.1.12 +reactor = 3.5.11 + # client dependencies httpclient5 = 5.2.1 httpcore5 = 5.2.2 diff --git a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java index cd75bddd680e5..c7b9bee3cbf4d 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java +++ b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java @@ -30,6 +30,13 @@ * GitHub history for details. */ +/* + * This code is based on code from SFL4J 1.5.11 + * Copyright (c) 2004-2007 QOS.ch + * All rights reserved. + * SPDX-License-Identifier: MIT + */ + package org.opensearch.core.common.logging; import java.util.HashSet; diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java index 86d8ad2968e7f..7316847ac6046 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java @@ -87,7 +87,7 @@ protected boolean forbidPrivateIndexSettings() { */ protected void prepareGeoShapeIndexForAggregations(final Random random) throws Exception { expectedDocsCountForGeoShapes = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List geoshapes = new ArrayList<>(); assertAcked(prepareCreate(GEO_SHAPE_INDEX_NAME).setSettings(settings).setMapping(GEO_SHAPE_FIELD_NAME, "type" + "=geo_shape")); boolean isShapeIntersectingBB = false; @@ -136,7 +136,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep expectedDocCountsForSingleGeoPoint = new HashMap<>(); createIndex("idx_unmapped"); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put("index.number_of_shards", 4) .put("index.number_of_replicas", 0) .build(); @@ -160,7 +160,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep protected void prepareMultiValuedGeoPointIndex(final Random random) throws Exception { multiValuedExpectedDocCountsGeoPoint = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List cities = new ArrayList<>(); assertAcked( prepareCreate("multi_valued_idx").setSettings(settings) diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 1677f333a4b1c..4970c42163ac3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -116,6 +116,9 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; import static org.opensearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +/** + * The HTTP transport implementations based on Netty 4. + */ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class); @@ -184,6 +187,17 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private volatile ServerBootstrap serverBootstrap; private volatile SharedGroupFactory.SharedGroup sharedGroup; + /** + * Creates new HTTP transport implementations based on Netty 4 + * @param settings seetings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + */ public Netty4HttpServerTransport( Settings settings, NetworkService networkService, diff --git a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt b/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 26e2b4813b8a5..51f2057b4bedb 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,11 +56,8 @@ dependencies { api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.23.0' - api 'org.reactivestreams:reactive-streams:1.0.4' - api 'io.projectreactor:reactor-core:3.5.6' - api 'io.projectreactor.netty:reactor-netty:1.1.8' - api 'io.projectreactor.netty:reactor-netty-core:1.1.8' - api 'io.projectreactor.netty:reactor-netty-http:1.1.9' + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" @@ -101,10 +98,6 @@ thirdPartyAudit { 'com.azure.storage.internal.avro.implementation.schema.AvroSchema', 'com.ctc.wstx.shaded.msv_core.driver.textui.Driver', 'io.micrometer.context.ContextAccessor', - 'io.micrometer.context.ContextRegistry', - 'io.micrometer.context.ContextSnapshot', - 'io.micrometer.context.ContextSnapshot$Scope', - 'io.micrometer.core.instrument.Clock', 'io.micrometer.core.instrument.Counter', 'io.micrometer.core.instrument.Counter$Builder', 'io.micrometer.core.instrument.DistributionSummary', @@ -114,14 +107,10 @@ thirdPartyAudit { 'io.micrometer.core.instrument.Meter', 'io.micrometer.core.instrument.MeterRegistry', 'io.micrometer.core.instrument.Metrics', - 'io.micrometer.core.instrument.Tag', - 'io.micrometer.core.instrument.Tags', 'io.micrometer.core.instrument.Timer', 'io.micrometer.core.instrument.Timer$Builder', 'io.micrometer.core.instrument.Timer$Sample', - 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', - 'io.micrometer.core.instrument.search.Search', 'io.netty.channel.epoll.Epoll', 'io.netty.channel.epoll.EpollDatagramChannel', 'io.netty.channel.epoll.EpollServerSocketChannel', @@ -168,9 +157,6 @@ thirdPartyAudit { 'org.slf4j.impl.StaticLoggerBinder', 'org.slf4j.impl.StaticMDCBinder', 'org.slf4j.impl.StaticMarkerBinder', - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration', - 'io.micrometer.context.ThreadLocalAccessor', 'io.micrometer.common.KeyValue', 'io.micrometer.common.KeyValues', 'io.micrometer.common.docs.KeyName', @@ -190,6 +176,7 @@ thirdPartyAudit { 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', 'io.micrometer.tracing.propagation.Propagator', 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.core.instrument.Tags', 'io.micrometer.observation.ObservationHandler', 'io.micrometer.observation.ObservationRegistry', 'io.micrometer.observation.ObservationRegistry$ObservationConfig', @@ -210,8 +197,7 @@ thirdPartyAudit { 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' ) } diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt b/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 deleted file mode 100644 index ad9b7263e7b38..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -027fdc551537b349389176a23a192f11a7a3d7de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 deleted file mode 100644 index 6b6bf1903b16c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d53a9d7d0395285f4c81664494fcd61477626e32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..352d69396d0c9 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 @@ -0,0 +1 @@ +378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 deleted file mode 100644 index 707631f4dfe0c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48999c4ae27cdcee5eaff9dfd150a8b64624f0f5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..1bcb0e0c52950 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 @@ -0,0 +1 @@ +e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 deleted file mode 100644 index 96deead2c75d1..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -408b3037133f2e8ab0f195ccd3f807026be9b860 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 44fd45b265e82..560d12d14395d 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -70,7 +70,6 @@ dependencies { api "software.amazon.awssdk:sts:${versions.aws}" api "software.amazon.awssdk:netty-nio-client:${versions.aws}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt b/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt b/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle new file mode 100644 index 0000000000000..7d7eb330b4a55 --- /dev/null +++ b/plugins/transport-reactor-netty4/build.gradle @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.gradle.info.BuildParams +import org.opensearch.gradle.test.RestIntegTestTask +import org.opensearch.gradle.test.TestTask +import org.opensearch.gradle.test.rest.JavaRestTestPlugin +import org.opensearch.gradle.test.InternalClusterTestPlugin + +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.java-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + +// The transport-reactor-netty4 plugin is published to maven +apply plugin: 'opensearch.publish' + +opensearchplugin { + description 'Reactor Netty 4 based transport implementation' + classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin' + hasClientJar = true +} + +dependencies { + // network stack + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-codec:${versions.netty}" + api "io.netty:netty-codec-dns:${versions.netty}" + api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "io.netty:netty-handler:${versions.netty}" + api "io.netty:netty-resolver-dns:${versions.netty}" + api "io.netty:netty-resolver:${versions.netty}" + api "io.netty:netty-transport:${versions.netty}" + api "io.netty:netty-transport-native-unix-common:${versions.netty}" + + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" + testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + testImplementation project(":modules:transport-netty4") +} + +restResources { + restApi { + includeCore '_common', 'cluster', 'nodes' + } +} + +tasks.named("dependencyLicenses").configure { + mapping from: /netty-.*/, to: 'netty' + mapping from: /reactor-.*/, to: 'reactor' +} + +// TODO: Remove that once we have a complete test suite +testingConventions.enabled = false + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +internalClusterTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +javaRestTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.CertificateCompressionAlgo', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + 'io.netty.internal.tcnative.SSLPrivateKeyMethod', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', + 'io.netty.internal.tcnative.CertificateCallback', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLSession', + 'io.netty.internal.tcnative.SSLSessionCache', + 'io.netty.channel.epoll.Epoll', + 'io.netty.channel.epoll.EpollDatagramChannel', + 'io.netty.channel.epoll.EpollServerSocketChannel', + 'io.netty.channel.epoll.EpollSocketChannel', + 'io.netty.channel.kqueue.KQueue', + 'io.netty.channel.kqueue.KQueueDatagramChannel', + 'io.netty.channel.kqueue.KQueueServerSocketChannel', + 'io.netty.channel.kqueue.KQueueSocketChannel', + 'io.netty.handler.codec.haproxy.HAProxyMessage', + 'io.netty.handler.codec.haproxy.HAProxyMessageDecoder', + 'io.netty.handler.proxy.ProxyHandler', + 'io.netty.incubator.channel.uring.IOUring', + 'io.netty.incubator.channel.uring.IOUringDatagramChannel', + 'io.netty.incubator.channel.uring.IOUringServerSocketChannel', + 'io.netty.incubator.channel.uring.IOUringSocketChannel', + + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'io.micrometer.common.KeyValue', + 'io.micrometer.common.KeyValues', + 'io.micrometer.common.docs.KeyName', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Gauge', + 'io.micrometer.core.instrument.Gauge$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.Meter$Type', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.docs.MeterDocumentation', + 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.observation.Observation', + 'io.micrometer.observation.Observation$Context', + 'io.micrometer.observation.ObservationHandler', + 'io.micrometer.observation.ObservationRegistry', + 'io.micrometer.observation.ObservationRegistry$ObservationConfig', + 'io.micrometer.observation.docs.ObservationDocumentation', + 'io.micrometer.observation.transport.ReceiverContext', + 'io.micrometer.observation.transport.RequestReplyReceiverContext', + 'io.micrometer.observation.transport.RequestReplySenderContext', + 'io.micrometer.observation.transport.SenderContext', + 'io.micrometer.tracing.Span', + 'io.micrometer.tracing.Tracer', + 'io.micrometer.tracing.docs.SpanDocumentation', + 'io.micrometer.tracing.handler.DefaultTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', + 'io.micrometer.tracing.propagation.Propagator' + ) + + ignoreViolations( + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5' + ) +} diff --git a/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt new file mode 100644 index 0000000000000..5bbf91a14de23 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt @@ -0,0 +1,116 @@ + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..dfa4a0fbea94c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..0948daa05fff6 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt rename to plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..352d69396d0c9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 @@ -0,0 +1 @@ +378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 new file mode 100644 index 0000000000000..1bcb0e0c52950 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 @@ -0,0 +1 @@ +e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java new file mode 100644 index 0000000000000..abbd50bf1b235 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch; + +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.Netty4ModulePlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.Collection; +import java.util.List; + +public abstract class OpenSearchReactorNetty4IntegTestCase extends OpenSearchIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected boolean addMockTransportService() { + return false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // randomize netty settings + if (randomBoolean()) { + builder.put(ReactorNetty4Transport.SETTING_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + } + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME); + return builder.build(); + } + + @Override + protected Collection> nodePlugins() { + return List.of(ReactorNetty4Plugin.class, Netty4ModulePlugin.class); + } +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java new file mode 100644 index 0000000000000..833d60375a2bd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +/** + * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. + * + * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing + * a single node "cluster". + */ +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) +public class ReactorNetty4HttpRequestSizeLimitIT extends OpenSearchReactorNetty4IntegTestCase { + + private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), LIMIT) + .build(); + } + + public void testLimitsInFlightRequests() throws Exception { + ensureGreen(); + + // we use the limit size as a (very) rough indication on how many requests we should sent to hit the limit + int numRequests = LIMIT.bytesAsInt() / 100; + + StringBuilder bulkRequest = new StringBuilder(); + for (int i = 0; i < numRequests; i++) { + bulkRequest.append("{\"index\": {}}"); + bulkRequest.append(System.lineSeparator()); + bulkRequest.append("{ \"field\" : \"value\" }"); + bulkRequest.append(System.lineSeparator()); + } + + List> requests = new ArrayList<>(); + for (int i = 0; i < 150; i++) { + requests.add(Tuple.tuple("/index/_bulk", bulkRequest)); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); + try { + assertThat(singleResponse, hasSize(1)); + assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK); + + final Collection multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); + try { + assertThat(multipleResponses, hasSize(requests.size())); + assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.TOO_MANY_REQUESTS); + } finally { + multipleResponses.forEach(ReferenceCounted::release); + } + } finally { + singleResponse.forEach(ReferenceCounted::release); + } + } + } + + public void testDoesNotLimitExcludedRequests() throws Exception { + ensureGreen(); + + List> requestUris = new ArrayList<>(); + for (int i = 0; i < 1500; i++) { + requestUris.add(Tuple.tuple("/_cluster/settings", "{ \"transient\": {\"search.default_search_timeout\": \"40s\" } }")); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); + try { + assertThat(responses, hasSize(requestUris.size())); + assertAllInExpectedStatus(responses, HttpResponseStatus.OK); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertAtLeastOnceExpectedStatus(Collection responses, HttpResponseStatus expectedStatus) { + long countExpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus)).count(); + assertThat("Expected at least one request with status [" + expectedStatus + "]", countExpectedStatus, greaterThan(0L)); + } + + private void assertAllInExpectedStatus(Collection responses, HttpResponseStatus expectedStatus) { + long countUnexpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus) == false).count(); + assertThat( + "Expected all requests with status [" + expectedStatus + "] but [" + countUnexpectedStatus + "] requests had a different one", + countUnexpectedStatus, + equalTo(0L) + ); + } + +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java new file mode 100644 index 0000000000000..c0e43de06f6ff --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class ReactorNetty4PipeliningIT extends OpenSearchReactorNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsPipelining() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + Collection responses = client.get(transportAddress.address(), true, requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = ReactorHttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInOrder(Collection opaqueIds) { + // check if opaque ids are monotonically increasing + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + for (String opaqueId : opaqueIds) { + assertThat(msg, opaqueId, is(String.valueOf(i++))); + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java new file mode 100644 index 0000000000000..bd75227dabd08 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.rest.RestRequest; + +import io.netty.handler.codec.http.HttpMethod; + +final class HttpConversionUtil { + private HttpConversionUtil() {} + + /** + * Converts {@link HttpMethod} to {@link RestRequest.Method} + * @param method {@link HttpMethod} method + * @return corresponding {@link RestRequest.Method} + * @throws IllegalArgumentException if HTTP method is not supported + */ + public static RestRequest.Method convertMethod(HttpMethod method) { + if (method == HttpMethod.GET) { + return RestRequest.Method.GET; + } else if (method == HttpMethod.POST) { + return RestRequest.Method.POST; + } else if (method == HttpMethod.PUT) { + return RestRequest.Method.PUT; + } else if (method == HttpMethod.DELETE) { + return RestRequest.Method.DELETE; + } else if (method == HttpMethod.HEAD) { + return RestRequest.Method.HEAD; + } else if (method == HttpMethod.OPTIONS) { + return RestRequest.Method.OPTIONS; + } else if (method == HttpMethod.PATCH) { + return RestRequest.Method.PATCH; + } else if (method == HttpMethod.TRACE) { + return RestRequest.Method.TRACE; + } else if (method == HttpMethod.CONNECT) { + return RestRequest.Method.CONNECT; + } else { + throw new IllegalArgumentException("Unexpected http method: " + method); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java new file mode 100644 index 0000000000000..98b359319ff1b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingHttpChannel implements HttpChannel { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompletableContext closeContext = new CompletableContext<>(); + private final FluxSink emitter; + + NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) { + this.request = request; + this.response = response; + this.emitter = emitter; + this.request.withConnection(connection -> Netty4Utils.addListener(connection.channel().closeFuture(), closeContext)); + } + + @Override + public boolean isOpen() { + final AtomicBoolean isOpen = new AtomicBoolean(); + request.withConnection(connection -> isOpen.set(connection.channel().isOpen())); + return isOpen.get(); + } + + @Override + public void close() { + request.withConnection(connection -> connection.channel().close()); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) response.remoteAddress(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) response.hostAddress(); + } + + FullHttpResponse createResponse(HttpResponse response) { + return (FullHttpResponse) response; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java new file mode 100644 index 0000000000000..d43e23e800e65 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpRequest; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import io.netty.buffer.CompositeByteBuf; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.LastHttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingRequestConsumer implements Consumer, Publisher, Disposable { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompositeByteBuf content; + private final Publisher publisher; + private final AbstractHttpServerTransport transport; + private final AtomicBoolean disposed = new AtomicBoolean(false); + private volatile FluxSink emitter; + + NonStreamingRequestConsumer( + AbstractHttpServerTransport transport, + HttpServerRequest request, + HttpServerResponse response, + int maxCompositeBufferComponents + ) { + this.transport = transport; + this.request = request; + this.response = response; + this.content = response.alloc().compositeBuffer(maxCompositeBufferComponents); + this.publisher = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter.onDispose(this).onCancel(this); + } + + @Override + public void accept(T message) { + try { + if (message instanceof LastHttpContent) { + process(message, emitter); + } else if (message instanceof HttpContent) { + process(message, emitter); + } + } catch (Throwable ex) { + emitter.error(ex); + } + } + + public void process(HttpContent in, FluxSink emitter) { + // Consume request body in full before dispatching it + content.addComponent(true, in.content().retain()); + + if (in instanceof LastHttpContent) { + final NonStreamingHttpChannel channel = new NonStreamingHttpChannel(request, response, emitter); + final HttpRequest r = createRequest(request, content); + + try { + transport.incomingRequest(r, channel); + } catch (Exception ex) { + emitter.error(ex); + transport.onException(channel, ex); + } finally { + r.release(); + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } + } + } + + HttpRequest createRequest(HttpServerRequest request, CompositeByteBuf content) { + return new ReactorNetty4HttpRequest(request, content.retain()); + } + + @Override + public void subscribe(Subscriber s) { + publisher.subscribe(s); + } + + @Override + public void dispose() { + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java new file mode 100644 index 0000000000000..4406c555a5b04 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java @@ -0,0 +1,272 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.util.AbstractMap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; +import reactor.netty.http.server.HttpServerRequest; + +class ReactorNetty4HttpRequest implements HttpRequest { + private final String protocol; + private final HttpMethod method; + private final String uri; + private final ByteBuf content; + private final HttpHeadersMap headers; + private final AtomicBoolean released; + private final Exception inboundException; + private final boolean pooled; + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content) { + this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), true, content); + } + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content, Exception inboundException) { + this( + request.protocol(), + request.method(), + request.uri(), + new HttpHeadersMap(request.requestHeaders()), + new AtomicBoolean(false), + true, + content, + inboundException + ); + } + + private ReactorNetty4HttpRequest( + HttpServerRequest request, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content + ) { + this(request.protocol(), request.method(), request.uri(), headers, released, pooled, content, null); + } + + private ReactorNetty4HttpRequest( + String protocol, + HttpMethod method, + String uri, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content, + Exception inboundException + ) { + + this.protocol = protocol; + this.method = method; + this.uri = uri; + this.headers = headers; + this.content = content; + this.pooled = pooled; + this.released = released; + this.inboundException = inboundException; + } + + @Override + public RestRequest.Method method() { + return HttpConversionUtil.convertMethod(method); + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + assert released.get() == false; + return Netty4Utils.toBytesReference(content); + } + + @Override + public void release() { + if (pooled && released.compareAndSet(false, true)) { + content.release(); + } + } + + @Override + public HttpRequest releaseAndCopy() { + assert released.get() == false; + if (pooled == false) { + return this; + } + try { + final ByteBuf copiedContent = Unpooled.copiedBuffer(content); + return new ReactorNetty4HttpRequest(protocol, method, uri, headers, new AtomicBoolean(false), false, copiedContent, null); + } finally { + release(); + } + } + + @Override + public final Map> getHeaders() { + return headers; + } + + @Override + public List strictCookies() { + String cookieString = headers.httpHeaders.get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + return ServerCookieEncoder.STRICT.encode(cookies); + } + } + return Collections.emptyList(); + } + + @Override + public HttpVersion protocolVersion() { + if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_0.toString())) { + return HttpRequest.HttpVersion.HTTP_1_0; + } else if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_1.toString())) { + return HttpRequest.HttpVersion.HTTP_1_1; + } else { + throw new IllegalArgumentException("Unexpected http protocol version: " + protocol); + } + } + + @Override + public HttpRequest removeHeader(String header) { + HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(headers.httpHeaders); + headersWithoutContentTypeHeader.remove(header); + + return new ReactorNetty4HttpRequest( + protocol, + method, + uri, + new HttpHeadersMap(headersWithoutContentTypeHeader), + released, + pooled, + content, + null + ); + } + + @Override + public ReactorNetty4HttpResponse createResponse(RestStatus status, BytesReference content) { + return new ReactorNetty4HttpResponse( + headers.httpHeaders, + io.netty.handler.codec.http.HttpVersion.valueOf(protocol), + status, + content + ); + } + + @Override + public Exception getInboundException() { + return inboundException; + } + + /** + * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications + * and due to the underlying implementation, it performs case insensitive lookups of key to values. + * + * It is important to note that this implementation does have some downsides in that each invocation of the + * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a + * view of the underlying values. + */ + private static class HttpHeadersMap implements Map> { + + private final HttpHeaders httpHeaders; + + private HttpHeadersMap(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + } + + @Override + public int size() { + return httpHeaders.size(); + } + + @Override + public boolean isEmpty() { + return httpHeaders.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return key instanceof String && httpHeaders.contains((String) key); + } + + @Override + public boolean containsValue(Object value) { + return value instanceof List && httpHeaders.names().stream().map(httpHeaders::getAll).anyMatch(value::equals); + } + + @Override + public List get(Object key) { + return key instanceof String ? httpHeaders.getAll((String) key) : null; + } + + @Override + public List put(String key, List value) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public List remove(Object key) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void putAll(Map> m) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public Set keySet() { + return httpHeaders.names(); + } + + @Override + public Collection> values() { + return httpHeaders.names().stream().map(k -> Collections.unmodifiableList(httpHeaders.getAll(k))).collect(Collectors.toList()); + } + + @Override + public Set>> entrySet() { + return httpHeaders.names() + .stream() + .map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + .collect(Collectors.toSet()); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java new file mode 100644 index 0000000000000..c45ad54b668a3 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; + +class ReactorNetty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse { + private final HttpHeaders requestHeaders; + + ReactorNetty4HttpResponse(HttpHeaders requestHeaders, HttpVersion version, RestStatus status, BytesReference content) { + super(version, HttpResponseStatus.valueOf(status.getStatus()), Netty4Utils.toByteBuf(content)); + this.requestHeaders = requestHeaders; + } + + @Override + public void addHeader(String name, String value) { + headers().add(name, value); + } + + @Override + public boolean containsHeader(String name) { + return headers().contains(name); + } + + public HttpHeaders requestHeaders() { + return requestHeaders; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java new file mode 100644 index 0000000000000..84360bf028ba9 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; + +import io.netty.channel.Channel; + +class ReactorNetty4HttpServerChannel implements HttpServerChannel { + private final Channel channel; + private final CompletableContext closeContext = new CompletableContext<>(); + + ReactorNetty4HttpServerChannel(Channel channel) { + this.channel = channel; + Netty4Utils.addListener(this.channel.closeFuture(), closeContext); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) channel.localAddress(); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() { + channel.close(); + } + + @Override + public String toString() { + return "ReactorNetty4HttpChannel{localAddress=" + getLocalAddress() + "}"; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java new file mode 100644 index 0000000000000..d4a5a9ad83af6 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpReadTimeoutException; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.net.SocketOption; +import java.time.Duration; + +import io.netty.channel.ChannelOption; +import io.netty.channel.socket.nio.NioChannelOption; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.timeout.ReadTimeoutException; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; +import reactor.netty.DisposableServer; +import reactor.netty.http.HttpProtocol; +import reactor.netty.http.server.HttpServer; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_COUNT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_IDLE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_INTERVAL; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; + +/** + * The HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + */ +public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTransport { + private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; + private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("opensearch.net.mtu", "1500"))); + + /** + * The number of Reactor Netty HTTP workers + */ + public static final Setting SETTING_HTTP_WORKER_COUNT = Setting.intSetting("http.netty.worker_count", 0, Property.NodeScope); + + /** + * The maximum number of composite components for request accumulation + */ + public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( + SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + (s) -> { + ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); + /* + * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of + * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate + * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() + * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also + * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). + * + * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see + * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for + * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The + * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). + * We assume here that OpenSearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. + * + * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. + * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. + * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. + */ + long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); + // clamp value to the allowed range + long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); + return String.valueOf(maxBufferComponents); + // Netty's CompositeByteBuf implementation does not allow less than two components. + }, + s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), + Property.NodeScope + ); + + private final SharedGroupFactory sharedGroupFactory; + private final int readTimeoutMillis; + private final int connectTimeoutMillis; + private final int maxCompositeBufferComponents; + private final ByteSizeValue maxInitialLineLength; + private final ByteSizeValue maxHeaderSize; + private final ByteSizeValue maxChunkSize; + private volatile SharedGroupFactory.SharedGroup sharedGroup; + private volatile DisposableServer disposableServer; + private volatile Scheduler scheduler; + + /** + * Creates new HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + * @param tracer tracer instance + */ + public ReactorNetty4HttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); + Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); + this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); + this.connectTimeoutMillis = Math.toIntExact(SETTING_HTTP_CONNECT_TIMEOUT.get(settings).getMillis()); + this.sharedGroupFactory = sharedGroupFactory; + this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); + this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); + this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + } + + /** + * Binds the transport engine to the socket address + * @param socketAddress socket address to bind to + */ + @Override + protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Exception { + final HttpServer server = configureChannelOptions( + HttpServer.create() + .httpFormDecoder(builder -> builder.scheduler(scheduler)) + .idleTimeout(Duration.ofMillis(connectTimeoutMillis)) + .readTimeout(Duration.ofMillis(readTimeoutMillis)) + .runOn(sharedGroup.getLowLevelGroup()) + .bindAddress(() -> socketAddress) + .compress(true) + .httpRequestDecoder( + spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt()) + .maxHeaderSize(maxHeaderSize.bytesAsInt()) + .maxInitialLineLength(maxInitialLineLength.bytesAsInt()) + ) + .protocol(HttpProtocol.HTTP11, HttpProtocol.H2C) + .handle((req, res) -> incomingRequest(req, res)) + ); + + disposableServer = server.bindNow(); + return new ReactorNetty4HttpServerChannel(disposableServer.channel()); + } + + private HttpServer configureChannelOptions(final HttpServer server1) { + HttpServer configured = server1.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)) + .childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + if (SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)) { + // Netty logs a warning if it can't set the option, so try this only on supported platforms + if (IOUtils.LINUX || IOUtils.MAC_OS_X) { + if (SETTING_HTTP_TCP_KEEP_IDLE.get(settings) >= 0) { + final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); + if (keepIdleOption != null) { + configured = configured.childOption(NioChannelOption.of(keepIdleOption), SETTING_HTTP_TCP_KEEP_IDLE.get(settings)); + } + } + if (SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) >= 0) { + final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); + if (keepIntervalOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepIntervalOption), + SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) + ); + } + } + if (SETTING_HTTP_TCP_KEEP_COUNT.get(settings) >= 0) { + final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); + if (keepCountOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepCountOption), + SETTING_HTTP_TCP_KEEP_COUNT.get(settings) + ); + } + } + } + } + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + configured = configured.option(ChannelOption.SO_REUSEADDR, reuseAddress); + configured = configured.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + return configured; + } + + /** + * Handles incoming Reactor Netty request + * @param request request instance + * @param response response instances + * @return response publisher + */ + protected Publisher incomingRequest(HttpServerRequest request, HttpServerResponse response) { + final NonStreamingRequestConsumer consumer = new NonStreamingRequestConsumer<>( + this, + request, + response, + maxCompositeBufferComponents + ); + + request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer); + + return Mono.from(consumer).flatMap(hc -> { + final FullHttpResponse r = (FullHttpResponse) hc; + response.status(r.status()); + response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue()))); + response.chunkedTransfer(false); + response.compression(true); + r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); + return Mono.from(response.sendObject(r.content())); + }); + } + + /** + * Called to tear down internal resources + */ + @Override + protected void stopInternal() { + if (sharedGroup != null) { + sharedGroup.shutdown(); + sharedGroup = null; + } + + if (scheduler != null) { + scheduler.dispose(); + scheduler = null; + } + + if (disposableServer != null) { + disposableServer.disposeNow(); + disposableServer = null; + } + } + + /** + * Starts the transport + */ + @Override + protected void doStart() { + boolean success = false; + try { + scheduler = Schedulers.newBoundedElastic( + Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE, + Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, + "http-form-decoder" + ); + sharedGroup = sharedGroupFactory.getHttpGroup(); + bindServer(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } + } + } + + @Override + public void onException(HttpChannel channel, Exception cause) { + if (cause instanceof ReadTimeoutException) { + super.onException(channel, new HttpReadTimeoutException(readTimeoutMillis, cause)); + } else { + super.onException(channel, cause); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..b5ecb0b62f79d --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new HTTP transport implementations based on Reactor Netty. + */ +package org.opensearch.http.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java new file mode 100644 index 0000000000000..dc310c3793109 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor; + +import org.opensearch.common.SetOnce; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +public class ReactorNetty4Plugin extends Plugin implements NetworkPlugin { + /** + * The name of new experimental HTTP transport implementations based on Reactor Netty. + */ + public static final String REACTOR_NETTY_HTTP_TRANSPORT_NAME = "reactor-netty4"; + + private final SetOnce groupFactory = new SetOnce<>(); + + /** + * Default constructor + */ + public ReactorNetty4Plugin() {} + + /** + * Returns a list of additional {@link Setting} definitions for this plugin. + */ + @Override + public List> getSettings() { + return Arrays.asList(/* no setting registered since we're picking the onces from Netty 4 transport */); + } + + /** + * Returns a map of {@link HttpServerTransport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param pageCacheRecycler page cache recycler instance + * @param circuitBreakerService circuit breaker service instance + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param tracer tracer instance + */ + @Override + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + REACTOR_NETTY_HTTP_TRANSPORT_NAME, + () -> new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + private SharedGroupFactory getSharedGroupFactory(Settings settings) { + final SharedGroupFactory groupFactory = this.groupFactory.get(); + if (groupFactory != null) { + assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; + return groupFactory; + } else { + this.groupFactory.set(new SharedGroupFactory(settings)); + return this.groupFactory.get(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java new file mode 100644 index 0000000000000..ab7de33c8e673 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.reactor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; + +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; + +/** + * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for + * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if + * {@link org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport#SETTING_HTTP_WORKER_COUNT} is configured to be 0. + * If that setting is not 0, then it will return a different group in the {@link #getHttpGroup()} call. + */ +public final class SharedGroupFactory { + + private static final Logger logger = LogManager.getLogger(SharedGroupFactory.class); + + private final Settings settings; + private final int workerCount; + private final int httpWorkerCount; + + private RefCountedGroup genericGroup; + private SharedGroup dedicatedHttpGroup; + + /** + * Creates new shared group factory instance from settings + * @param settings settings + */ + public SharedGroupFactory(Settings settings) { + this.settings = settings; + this.workerCount = ReactorNetty4Transport.SETTING_WORKER_COUNT.get(settings); + this.httpWorkerCount = ReactorNetty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.get(settings); + } + + Settings getSettings() { + return settings; + } + + /** + * Gets the number of configured transport workers + * @return the number of configured transport workers + */ + public int getTransportWorkerCount() { + return workerCount; + } + + /** + * Gets transport shared group + * @return transport shared group + */ + public synchronized SharedGroup getTransportGroup() { + return getGenericGroup(); + } + + /** + * Gets HTTP transport shared group + * @return HTTP transport shared group + */ + public synchronized SharedGroup getHttpGroup() { + if (httpWorkerCount == 0) { + return getGenericGroup(); + } else { + if (dedicatedHttpGroup == null) { + NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( + httpWorkerCount, + daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + ); + dedicatedHttpGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); + } + return dedicatedHttpGroup; + } + } + + private SharedGroup getGenericGroup() { + if (genericGroup == null) { + EventLoopGroup eventLoopGroup = new NioEventLoopGroup( + workerCount, + daemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) + ); + this.genericGroup = new RefCountedGroup(eventLoopGroup); + } else { + genericGroup.incRef(); + } + return new SharedGroup(genericGroup); + } + + private static class RefCountedGroup extends AbstractRefCounted { + + public static final String NAME = "ref-counted-event-loop-group"; + private final EventLoopGroup eventLoopGroup; + + private RefCountedGroup(EventLoopGroup eventLoopGroup) { + super(NAME); + this.eventLoopGroup = eventLoopGroup; + } + + @Override + protected void closeInternal() { + Future shutdownFuture = eventLoopGroup.shutdownGracefully(0, 5, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (shutdownFuture.isSuccess() == false) { + logger.warn("Error closing netty event loop group", shutdownFuture.cause()); + } + } + } + + /** + * Wraps the {@link RefCountedGroup}. Calls {@link RefCountedGroup#decRef()} on close. After close, + * this wrapped instance can no longer be used. + */ + public static class SharedGroup { + + private final RefCountedGroup refCountedGroup; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + private SharedGroup(RefCountedGroup refCountedGroup) { + this.refCountedGroup = refCountedGroup; + } + + /** + * Gets Netty's {@link EventLoopGroup} instance + * @return Netty's {@link EventLoopGroup} instance + */ + public EventLoopGroup getLowLevelGroup() { + return refCountedGroup.eventLoopGroup; + } + + /** + * Decreases the reference to underlying {@link EventLoopGroup} instance + */ + public void shutdown() { + if (isOpen.compareAndSet(true, false)) { + refCountedGroup.decRef(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java new file mode 100644 index 0000000000000..8ec432b7dd5cd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor.netty4; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.Booleans; +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.util.NettyRuntime; + +/** + * Shameless copy of Netty4Utils from transport-netty4 module + */ +public final class Netty4Utils { + private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); + + /** + * Utility class + */ + private Netty4Utils() {} + + /** + * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). + * + * @param availableProcessors the number of available processors + * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value + */ + public static void setAvailableProcessors(final int availableProcessors) { + // we set this to false in tests to avoid tests that randomly set processors from stepping on each other + final boolean set = Booleans.parseBoolean(System.getProperty("opensearch.set.netty.runtime.available.processors", "true")); + if (!set) { + return; + } + + /* + * This can be invoked twice, once from Netty4Transport and another time from Netty4HttpServerTransport; however, + * Netty4Runtime#availableProcessors forbids settings the number of processors twice so we prevent double invocation here. + */ + if (isAvailableProcessorsSet.compareAndSet(false, true)) { + NettyRuntime.setAvailableProcessors(availableProcessors); + } else if (availableProcessors != NettyRuntime.availableProcessors()) { + /* + * We have previously set the available processors yet either we are trying to set it to a different value now or there is a bug + * in Netty and our previous value did not take, bail. + */ + final String message = String.format( + Locale.ROOT, + "available processors value [%d] did not match current value [%d]", + availableProcessors, + NettyRuntime.availableProcessors() + ); + throw new IllegalStateException(message); + } + } + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + * @param reference reference to convert + */ + public static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + + if (buffers.size() == 1) { + return buffers.get(0); + } else { + CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + + /** + * Wraps the given ChannelBuffer with a BytesReference + * @param buffer buffer to convert + */ + public static BytesReference toBytesReference(final ByteBuf buffer) { + final int readableBytes = buffer.readableBytes(); + if (readableBytes == 0) { + return BytesArray.EMPTY; + } else if (buffer.hasArray()) { + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), readableBytes); + } else { + final ByteBuffer[] byteBuffers = buffer.nioBuffers(); + return BytesReference.fromByteBuffers(byteBuffers); + } + } + + /** + * Add completion listener to ChannelFuture + * @param channelFuture ChannelFuture to add listener to + * @param context completion listener context + */ + public static void addListener(ChannelFuture channelFuture, CompletableContext context) { + channelFuture.addListener(f -> { + if (f.isSuccess()) { + context.complete(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + context.completeExceptionally(new Exception(cause)); + } else { + context.completeExceptionally((Exception) cause); + } + } + }); + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java new file mode 100644 index 0000000000000..b3e92f58c540a --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.reactor.netty4; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; + +import reactor.netty.tcp.TcpServer; + +/** + * The transport implementations based on Reactor Netty (see please {@link TcpServer}). + */ +public class ReactorNetty4Transport { + /** + * The number of Netty workers + */ + public static final Setting SETTING_WORKER_COUNT = new Setting<>( + "transport.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), + Property.NodeScope + ); + + /** + * Default constructor + */ + public ReactorNetty4Transport() {} +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..921bca104c6fe --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java new file mode 100644 index 0000000000000..2f36ebb7f11f8 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor; diff --git a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..4f2dcde995338 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java new file mode 100644 index 0000000000000..443ecd0f40ead --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -0,0 +1,208 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.tasks.Task; + +import java.io.Closeable; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.resolver.DefaultAddressResolverGroup; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.ParallelFlux; +import reactor.netty.http.client.HttpClient; + +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/** + * Tiny helper to send http requests over netty. + */ +class ReactorHttpClient implements Closeable { + private final boolean compression; + + static Collection returnHttpResponseBodies(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (FullHttpResponse response : responses) { + list.add(response.content().toString(StandardCharsets.UTF_8)); + } + return list; + } + + static Collection returnOpaqueIds(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (HttpResponse response : responses) { + list.add(response.headers().get(Task.X_OPAQUE_ID)); + } + return list; + } + + ReactorHttpClient(boolean compression) { + this.compression = compression; + } + + static ReactorHttpClient create() { + return create(true); + } + + static ReactorHttpClient create(boolean compression) { + return new ReactorHttpClient(compression); + } + + public List get(InetSocketAddress remoteAddress, String... uris) throws InterruptedException { + return get(remoteAddress, false, uris); + } + + public List get(InetSocketAddress remoteAddress, boolean ordered, String... uris) throws InterruptedException { + final List requests = new ArrayList<>(uris.length); + + for (int i = 0; i < uris.length; i++) { + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); + httpRequest.headers().add(HOST, "localhost"); + httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + requests.add(httpRequest); + } + + return sendRequests(remoteAddress, requests, ordered); + } + + public final Collection post(InetSocketAddress remoteAddress, List> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + final List responses = sendRequests(remoteAddress, Collections.singleton(httpRequest), false); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest, HttpContent content) + throws InterruptedException { + final List responses = sendRequests( + remoteAddress, + Collections.singleton( + new DefaultFullHttpRequest( + httpRequest.protocolVersion(), + httpRequest.method(), + httpRequest.uri(), + content.content(), + httpRequest.headers(), + httpRequest.trailingHeaders() + ) + ), + false + ); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final Collection put(InetSocketAddress remoteAddress, List> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.PUT, remoteAddress, urisAndBodies); + } + + private List processRequestsWithBody( + HttpMethod method, + InetSocketAddress remoteAddress, + List> urisAndBodies + ) throws InterruptedException { + List requests = new ArrayList<>(urisAndBodies.size()); + for (int i = 0; i < urisAndBodies.size(); ++i) { + final Tuple uriAndBody = urisAndBodies.get(i); + ByteBuf content = Unpooled.copiedBuffer(uriAndBody.v2(), StandardCharsets.UTF_8); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, method, uriAndBody.v1(), content); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + request.headers().add("X-Opaque-ID", String.valueOf(i)); + requests.add(request); + } + return sendRequests(remoteAddress, requests, false); + } + + private List sendRequests( + final InetSocketAddress remoteAddress, + final Collection requests, + boolean orderer + ) { + final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); + try { + final HttpClient client = HttpClient.newConnection() + .resolver(DefaultAddressResolverGroup.INSTANCE) + .runOn(eventLoopGroup) + .host(remoteAddress.getHostString()) + .port(remoteAddress.getPort()) + .compress(compression); + + @SuppressWarnings("unchecked") + final Mono[] monos = requests.stream() + .map( + request -> client.headers(h -> h.add(request.headers())) + .baseUrl(request.getUri()) + .request(request.method()) + .send(Mono.fromSupplier(() -> request.content())) + .responseSingle( + (r, body) -> body.switchIfEmpty(Mono.just(Unpooled.EMPTY_BUFFER)) + .map( + b -> new DefaultFullHttpResponse( + r.version(), + r.status(), + b.retain(), + r.responseHeaders(), + EmptyHttpHeaders.INSTANCE + ) + ) + ) + ) + .toArray(Mono[]::new); + + if (orderer == false) { + return ParallelFlux.from(monos).sequential().collectList().block(); + } else { + return Flux.concat(monos).flatMapSequential(r -> Mono.just(r)).collectList().block(); + } + } finally { + eventLoopGroup.shutdownGracefully().awaitUninterruptibly(); + } + } + + @Override + public void close() { + + } +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java new file mode 100644 index 0000000000000..00ca378a4e46b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ReactorNetty4BadRequestTests extends OpenSearchTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new OpenSearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + Settings settings = Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()).build(); + try ( + HttpServerTransport httpServerTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create()) { + final List responses = nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + + try { + assertThat(responses, hasSize(1)); + final FullHttpResponse response = responses.get(0); + assertThat(response.status().code(), equalTo(400)); + final Collection responseBodies = ReactorHttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + final String body = responseBodies.iterator().next(); + assertThat(body, containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + body, + containsString("\"reason\":\"java.lang.IllegalArgumentException: partial escape sequence at end of string: %/\"") + ); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..15a5b04c802a4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java @@ -0,0 +1,579 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocatorMetric; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; + +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link ReactorNetty4HttpServerTransport} class. + */ +public class ReactorNetty4HttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + // Reactor Netty 4 does not expose 100 CONTINUE response but instead just asks for content + final HttpContent continuationRequest = new DefaultHttpContent(Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), request, continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } finally { + continuationResponse.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + initialSettings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + ReactorNetty4HttpServerTransport otherTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final String url = "/" + randomAlphaOfLength(maxInitialLineLength); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.REQUEST_URI_TOO_LONG)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testDispatchFailed() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new RuntimeException("Bad things happen"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.INTERNAL_SERVER_ERROR)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing/"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + long numOfHugeAllocations = getHugeAllocationCount(); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(getHugeAllocationCount(), equalTo(numOfHugeAllocations)); + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + private long getHugeAllocationCount() { + long numOfHugAllocations = 0; + ByteBufAllocator allocator = NettyAllocator.getAllocator(); + assert allocator instanceof NettyAllocator.NoDirectBuffers; + ByteBufAllocator delegate = ((NettyAllocator.NoDirectBuffers) allocator).getDelegate(); + if (delegate instanceof PooledByteBufAllocator) { + PooledByteBufAllocatorMetric metric = ((PooledByteBufAllocator) delegate).metric(); + numOfHugAllocations = metric.heapArenas().stream().mapToLong(PoolArenaMetric::numHugeAllocations).sum(); + } + return numOfHugAllocations; + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testConnectTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + NioEventLoopGroup group = new NioEventLoopGroup(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + final CountDownLatch channelClosedLatch = new CountDownLatch(1); + + final Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer() { + + @Override + protected void initChannel(SocketChannel ch) { + ch.pipeline().addLast(new ChannelHandlerAdapter() { + }); + + } + }) + .group(group); + ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); + + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + + } finally { + group.shutdownGracefully().await(); + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..e669016cad98a --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml @@ -0,0 +1,16 @@ +--- +"Insert Document with geoshape field": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..30a39447905c0 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml @@ -0,0 +1,28 @@ +--- +"Create index with Geoshape field": + - do: + indices.create: + index: geo_shape_index_old + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..4c7b12a7f1909 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml @@ -0,0 +1,61 @@ +--- +"Validate we are able to index documents after upgrade": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } + + +--- +"Create index with Geoshape field in new cluster": + - do: + indices.create: + index: geo_shape_index_new + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_new", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_new", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + body: + aggregations: + myaggregation: + geo_bounds: + field: "location" + - match: { hits.total: 2 } + - match: { aggregations.myaggregation.bounds.top_left.lat: 0.9999999823048711 } + - match: { aggregations.myaggregation.bounds.top_left.lon: 99.99999999068677 } + - match: { aggregations.myaggregation.bounds.bottom_right.lat: 0.0 } + - match: { aggregations.myaggregation.bounds.bottom_right.lon: 105.99999996833503 } diff --git a/server/build.gradle b/server/build.gradle index c56f9d5aa288f..fa8a44ef6fc94 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -154,6 +154,10 @@ dependencies { // jcraft api "com.jcraft:jzlib:${versions.jzlib}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + // protobuf api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" @@ -366,11 +370,13 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', 'com.google.protobuf.UnsafeUtil$MemoryAccessor', 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', - 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1', + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' ) } tasks.named("dependencyLicenses").configure { + mapping from: /reactor-.*/, to: 'reactor' mapping from: /lucene-.*/, to: 'lucene' dependencies = project.configurations.runtimeClasspath.fileCollection { it.group.startsWith('org.opensearch') == false || diff --git a/plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 b/server/licenses/reactive-streams-1.0.4.jar.sha1 similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 rename to server/licenses/reactive-streams-1.0.4.jar.sha1 diff --git a/plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt b/server/licenses/reactive-streams-LICENSE.txt similarity index 100% rename from plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt rename to server/licenses/reactive-streams-LICENSE.txt diff --git a/plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactive-streams-NOTICE.txt similarity index 100% rename from plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt rename to server/licenses/reactive-streams-NOTICE.txt diff --git a/server/licenses/reactor-LICENSE.txt b/server/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/server/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-azure/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactor-NOTICE.txt similarity index 100% rename from plugins/repository-azure/licenses/reactive-streams-NOTICE.txt rename to server/licenses/reactor-NOTICE.txt diff --git a/server/licenses/reactor-core-3.5.11.jar.sha1 b/server/licenses/reactor-core-3.5.11.jar.sha1 new file mode 100644 index 0000000000000..e5ffdbc8a7840 --- /dev/null +++ b/server/licenses/reactor-core-3.5.11.jar.sha1 @@ -0,0 +1 @@ +db2299757f562261eb775d13658e86ff06f91e8a \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index 186a5ce39f131..a82fd8d845709 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -19,7 +19,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -124,30 +123,4 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } - public void testIndexReplicationTypeWhenRestrictSettingTrue() { - testRestrictIndexReplicationTypeSetting(true, randomFrom(ReplicationType.values())); - } - - public void testIndexReplicationTypeWhenRestrictSettingFalse() { - testRestrictIndexReplicationTypeSetting(false, randomFrom(ReplicationType.values())); - } - - private void testRestrictIndexReplicationTypeSetting(boolean setRestrict, ReplicationType replicationType) { - String expectedExceptionMsg = - "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true];"; - String clusterManagerName = internalCluster().startNode( - Settings.builder().put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), setRestrict).build() - ); - internalCluster().startDataOnlyNodes(1); - - // Test create index fails - Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, replicationType).build(); - if (setRestrict) { - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); - assertEquals(expectedExceptionMsg, exception.getMessage()); - } else { - createIndex(INDEX_NAME, indexSettings); - } - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index a2996d87a851b..9c93a8f85db8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -1066,9 +1066,14 @@ public void testScrollCreatedOnReplica() throws Exception { client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up post scroll clear request", currentFiles.containsAll(snapshottedSegments)); + assertBusy( + () -> assertFalse( + "Files should be cleaned up post scroll clear request", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); assertEquals(100, scrollHits); + } /** @@ -1327,9 +1332,12 @@ public void testPitCreatedOnReplica() throws Exception { // delete the PIT DeletePitRequest deletePITRequest = new DeletePitRequest(pitResponse.getId()); client().execute(DeletePitAction.INSTANCE, deletePITRequest).actionGet(); - - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up", currentFiles.containsAll(snapshottedSegments)); + assertBusy( + () -> assertFalse( + "Files should be cleaned up", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 865dd670fbf68..1d5f7f93e7410 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -33,9 +33,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -221,10 +222,6 @@ public void testNestedDiversity() throws Exception { } public void testNestedSamples() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); // Test samples nested under samples int MAX_DOCS_PER_AUTHOR = 1; int MAX_DOCS_PER_GENRE = 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index 7033c42c5d661..c7b03d21cb6bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -34,9 +34,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -195,6 +196,23 @@ public void testSimpleSampler() throws Exception { assertThat(maxBooksPerAuthor, equalTo(3L)); } + public void testSimpleSamplerShardSize() throws Exception { + final int SHARD_SIZE = randomIntBetween(1, 3); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(SHARD_SIZE); + sampleAgg.subAggregation(terms("authors").field("author")); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); + assertSearchResponse(response); + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + assertEquals(SHARD_SIZE * NUM_SHARDS, sample.getDocCount()); + } + public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index 6b95405b3ebd4..f5d1b8234558e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -244,10 +244,6 @@ public void testWithIndexAlias() { } public void testWithIndexFilter() throws InterruptedException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10433", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword")); assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 83dec7b27a897..656e7b2e366ed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -293,6 +293,7 @@ public void testMultiNested() throws Exception { refresh(); // check the numDocs assertDocumentCount("test", 7); + indexRandomForConcurrentSearch("test"); // do some multi nested queries SearchResponse searchResponse = client().prepareSearch("test") @@ -485,6 +486,7 @@ public void testExplain() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) @@ -968,6 +970,10 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11065", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)) .setMapping( @@ -1035,6 +1041,7 @@ public void testLeakingSortValues() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(termQuery("_id", 2)) @@ -1627,6 +1634,7 @@ public void testCheckFixedBitSetCache() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); ensureSearchable("test"); + indexRandomForConcurrentSearch("test"); // No nested mapping yet, there shouldn't be anything in the fixed bit set cache ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index e42f12709c948..a3432bfe7e3e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -100,6 +100,7 @@ public void clearIndex() { public void testPit() throws Exception { CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + indexRandomForConcurrentSearch("index"); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); SearchResponse searchResponse = client().prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 425764b1c88d2..97fe05f5b9747 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -52,7 +52,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -99,7 +98,7 @@ public Settings nodeSettings(int nodeOrdinal) { } // see #2896 - public void testStopOneNodePreferenceWithRedState() throws IOException { + public void testStopOneNodePreferenceWithRedState() throws Exception { assertAcked( prepareCreate("test").setSettings( Settings.builder().put("index.number_of_shards", cluster().numDataNodes() + 2).put("index.number_of_replicas", 0) @@ -110,6 +109,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); String[] preferences = new String[] { @@ -138,7 +138,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); } - public void testNoPreferenceRandom() { + public void testNoPreferenceRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -149,6 +149,7 @@ public void testNoPreferenceRandom() { client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -201,7 +202,7 @@ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { } } - public void testNodesOnlyRandom() { + public void testNodesOnlyRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -211,6 +212,7 @@ public void testNodesOnlyRandom() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); // multiple wildchar to cover multi-param usecase diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 099eb934f4f4d..1ca5859f23bca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -186,6 +186,7 @@ public void testDocWithAllTypes() throws Exception { String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -225,6 +226,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -245,6 +247,7 @@ public void testRegexCaseInsensitivity() throws Exception { indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake")); indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("messages"); SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get(); assertNoFailures(response); @@ -282,6 +285,7 @@ public void testAllFields() throws Exception { List reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test_1"); SearchResponse resp = client().prepareSearch("test_1") .setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)) @@ -374,6 +378,7 @@ public void testLimitOnExpandedFields() throws Exception { client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("testindex"); // single field shouldn't trigger the limit doAssertOneHitForQueryString("field_A0:foo"); @@ -465,6 +470,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index 7ba582811bbc2..55029712a061c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -109,13 +109,14 @@ protected Map, Object>> pluginScripts() { // 1) only matched docs retrieved // 2) score is calculated based on a script with params // 3) min score applied - public void testScriptScore() { + public void testScriptScore() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); @@ -135,13 +136,14 @@ public void testScriptScore() { assertOrderedSearchHits(resp, "10", "8", "6"); } - public void testScriptScoreBoolQuery() { + public void testScriptScoreBoolQuery() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); @@ -155,7 +157,7 @@ public void testScriptScoreBoolQuery() { } // test that when the internal query is rewritten script_score works well - public void testRewrittenQuery() { + public void testRewrittenQuery() throws Exception { assertAcked( prepareCreate("test-index2").setSettings(Settings.builder().put("index.number_of_shards", 1)) .setMapping("field1", "type=date", "field2", "type=double") @@ -164,6 +166,7 @@ public void testRewrittenQuery() { client().prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); client().prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); refresh(); + indexRandomForConcurrentSearch("test-index2"); RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Map params = new HashMap<>(); @@ -174,7 +177,7 @@ public void testRewrittenQuery() { assertOrderedSearchHits(resp, "3", "2", "1"); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws Exception { try { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; @@ -182,6 +185,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map params = new HashMap<>(); params.put("param1", 0.1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 384d2b7423e66..017d28ef3a2a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -150,6 +150,7 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar")).get(); assertHitCount(searchResponse, 3L); @@ -199,6 +200,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("3").setSource("body", "foo bar"), client().prepareIndex("test").setId("4").setSource("body", "foo baz bar") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 1"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get(); @@ -235,6 +237,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), client().prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 5"); searchResponse = client().prepareSearch() @@ -256,7 +259,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { assertSearchHits(searchResponse, "6", "7", "8"); } - public void testNestedFieldSimpleQueryString() throws IOException { + public void testNestedFieldSimpleQueryString() throws Exception { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -275,6 +278,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { ); client().prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")).get(); assertHitCount(searchResponse, 1L); @@ -359,6 +363,8 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte client().prepareIndex("test2").setId("10").setSource("field", 5) ); refresh(); + indexRandomForConcurrentSearch("test1"); + indexRandomForConcurrentSearch("test2"); SearchResponse searchResponse = client().prepareSearch() .setAllowPartialSearchResults(true) @@ -419,6 +425,7 @@ public void testSimpleQueryStringUsesFieldAnalyzer() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("123").field("foo").field("bar")).get(); assertHitCount(searchResponse, 1L); @@ -469,6 +476,7 @@ public void testBasicAllQuery() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHitCount(resp, 2L); @@ -492,6 +500,7 @@ public void testWithDate() throws Exception { reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -523,6 +532,7 @@ public void testWithLotsOfTypes() throws Exception { client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") ); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -550,6 +560,7 @@ public void testDocWithAllTypes() throws Exception { String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -596,6 +607,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -663,6 +675,7 @@ public void testFieldAlias() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); @@ -681,6 +694,7 @@ public void testFieldAliasWithWildcardField() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); @@ -697,6 +711,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 78a22fe11f072..8d76a39712ee3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1252,7 +1252,6 @@ List getIndexSettingsValidationErrors( if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } - validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); if (indexName.isEmpty() || indexName.get().charAt(0) != '.') { // Apply aware replica balance validation only to non system indices int replicaCount = settings.getAsInt( @@ -1307,24 +1306,6 @@ private static List validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } - /** - * Validates {@code index.replication.type} is not set if {@code cluster.restrict.index.replication_type} is set to true. - * - * @param requestSettings settings passed in during index create request - * @param clusterSettings cluster setting - */ - private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { - if (requestSettings.hasValue(SETTING_REPLICATION_TYPE) - && clusterSettings.get(IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING)) { - return Optional.of( - "index setting [index.replication.type] is not allowed to be set as [" - + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey() - + "=true]" - ); - } - return Optional.empty(); - } - /** * Validates the settings and mappings for shrinking an index. * diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 3a1fff21db366..2bb81064c9c71 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -348,6 +348,7 @@ public void apply(Settings value, Settings current, Settings previous) { HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT, + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT, HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, HttpTransportSettings.OLD_SETTING_HTTP_TCP_NO_DELAY, HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY, @@ -691,8 +692,7 @@ public void apply(Settings value, Settings current, Settings previous) { AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, - CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING + CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT ) ) ); diff --git a/server/src/main/java/org/opensearch/http/HttpTransportSettings.java b/server/src/main/java/org/opensearch/http/HttpTransportSettings.java index f16f06f414e28..621ef36692178 100644 --- a/server/src/main/java/org/opensearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/opensearch/http/HttpTransportSettings.java @@ -182,6 +182,14 @@ public final class HttpTransportSettings { Property.NodeScope ); + // A default of 0 means that by default there is no connect timeout + public static final Setting SETTING_HTTP_CONNECT_TIMEOUT = Setting.timeSetting( + "http.connect_timeout", + new TimeValue(0), + new TimeValue(0), + Property.NodeScope + ); + // Tcp socket settings public static final Setting OLD_SETTING_HTTP_TCP_NO_DELAY = boolSetting( diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java index 4a4b2684b5f4c..b44b4b75549c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java @@ -31,12 +31,15 @@ package org.opensearch.index.mapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.LatLonShape; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; +import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.geo.ShapeRelation; @@ -77,6 +80,7 @@ * @opensearch.internal */ public class GeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper { + private static final Logger logger = LogManager.getLogger(GeoShapeFieldMapper.class); public static final String CONTENT_TYPE = "geo_shape"; public static final FieldType FIELD_TYPE = new FieldType(); static { @@ -205,9 +209,24 @@ protected void addDocValuesFields( final List indexableFields, final ParseContext context ) { - Field[] fieldsArray = new Field[indexableFields.size()]; - fieldsArray = indexableFields.toArray(fieldsArray); - context.doc().add(LatLonShape.createDocValueField(name, fieldsArray)); + /* + * We are adding the doc values for GeoShape only if the index is created with 2.9 and above version of + * OpenSearch. If we don't do that after the upgrade of OpenSearch customers are not able to index documents + * with GeoShape fields. Github issue: https://github.com/opensearch-project/OpenSearch/issues/10958, + * https://github.com/opensearch-project/OpenSearch/issues/10795 + */ + if (context.indexSettings().getIndexVersionCreated().onOrAfter(Version.V_2_9_0)) { + Field[] fieldsArray = new Field[indexableFields.size()]; + fieldsArray = indexableFields.toArray(fieldsArray); + context.doc().add(LatLonShape.createDocValueField(name, fieldsArray)); + } else { + logger.warn( + "The index was created with Version : {}, for geoshape doc values to work index must be " + + "created with OpenSearch Version : {} or above", + context.indexSettings().getIndexVersionCreated(), + Version.V_2_9_0 + ); + } } @Override diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 36abc77893d81..50c551c2be29b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -299,17 +299,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * This setting is used to restrict creation of index where the 'index.replication.type' index setting is set. - * If disabled, the replication type can be specified. - */ - public static final Setting CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING = Setting.boolSetting( - "cluster.restrict.index.replication_type", - false, - Property.NodeScope, - Property.Final - ); - /** * The node's settings. */ diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 73da0482537ad..cb738d74000bc 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -70,7 +70,7 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final ReplicationCollection onGoingReplications; - private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); + private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); private final SegmentReplicationSourceFactory sourceFactory; @@ -192,7 +192,7 @@ public SegmentReplicationState getOngoingEventSegmentReplicationState(ShardId sh */ @Nullable public SegmentReplicationState getlatestCompletedEventSegmentReplicationState(ShardId shardId) { - return Optional.ofNullable(completedReplications.get(shardId)).map(SegmentReplicationTarget::state).orElse(null); + return completedReplications.get(shardId); } /** @@ -525,7 +525,7 @@ public void onResponse(Void o) { logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { - completedReplications.put(target.shardId(), target); + completedReplications.put(target.shardId(), target.state()); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 5f81c76b69385..0f3c9872353c1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -162,6 +162,6 @@ public InternalAggregation buildEmptyAggregation() { @Override protected boolean supportsConcurrentSegmentSearch() { - return true; + return false; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java index d3db8a66ee21f..51d9830d3cea0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java @@ -75,6 +75,6 @@ public Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - return true; + return false; } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cace66d8c6d9e..e40826915c848 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -139,7 +139,6 @@ import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -1178,8 +1177,6 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) - .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), true) - .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getSettings()).thenReturn(settings); @@ -1203,12 +1200,8 @@ public void testvalidateIndexSettings() { ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); - assertThat(validationErrors.size(), is(2)); - assertThat( - validationErrors.get(0), - is("index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true]") - ); - assertThat(validationErrors.get(1), is("expected total copies needs to be a multiple of total awareness attributes [3]")); + assertThat(validationErrors.size(), is(1)); + assertThat(validationErrors.get(0), is("expected total copies needs to be a multiple of total awareness attributes [3]")); settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -1216,13 +1209,8 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_NUMBER_OF_REPLICAS, 2) - .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), false) - .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - when(clusterService.getClusterSettings()).thenReturn(clusterSettings); - validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); assertThat(validationErrors.size(), is(0)); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 20cec90d79e3e..dd92bfb47afdb 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -32,6 +32,7 @@ import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.CorruptionUtils; +import org.opensearch.test.junit.annotations.TestLogging; import org.hamcrest.MatcherAssert; import org.junit.Assert; @@ -297,31 +298,39 @@ public void testPrimaryRestart_PrimaryHasExtraCommits() throws Exception { } } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testRepicaCleansUpOldCommitsWhenReceivingNew() throws Exception { final Path remotePath = createTempDir(); try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), remotePath)) { shards.startAll(); final IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); + final Store store = replica.store(); + final SegmentInfos initialCommit = store.readLastCommittedSegmentsInfo(); shards.indexDocs(1); flushShard(primary); replicateSegments(primary, shards.getReplicas()); + assertDocCount(primary, 1); assertDocCount(replica, 1); - assertEquals("segments_5", replica.store().readLastCommittedSegmentsInfo().getSegmentsFileName()); - assertSingleSegmentFile(replica, "segments_5"); + assertSingleSegmentFile(replica); + final SegmentInfos secondCommit = store.readLastCommittedSegmentsInfo(); + assertTrue(secondCommit.getGeneration() > initialCommit.getGeneration()); shards.indexDocs(1); primary.refresh("test"); replicateSegments(primary, shards.getReplicas()); assertDocCount(replica, 2); - assertSingleSegmentFile(replica, "segments_5"); + assertSingleSegmentFile(replica); + assertEquals(store.readLastCommittedSegmentsInfo().getGeneration(), secondCommit.getGeneration()); shards.indexDocs(1); flushShard(primary); replicateSegments(primary, shards.getReplicas()); assertDocCount(replica, 3); - assertSingleSegmentFile(replica, "segments_6"); + assertSingleSegmentFile(replica); + final SegmentInfos thirdCommit = store.readLastCommittedSegmentsInfo(); + assertTrue(thirdCommit.getGeneration() > secondCommit.getGeneration()); final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); assertTrue(diff.missing.isEmpty()); @@ -571,11 +580,10 @@ protected void validateShardIdleWithNoReplicas(IndexShard primary) { assertFalse(primary.hasRefreshPending()); } - private void assertSingleSegmentFile(IndexShard shard, String fileName) throws IOException { + private void assertSingleSegmentFile(IndexShard shard) throws IOException { final Set segmentsFileNames = Arrays.stream(shard.store().directory().listAll()) .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) .collect(Collectors.toSet()); assertEquals("Expected a single segment file", 1, segmentsFileNames.size()); - assertEquals(segmentsFileNames.stream().findFirst().get(), fileName); } } diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 28af8a63cfba8..62dcf54e25578 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -336,12 +336,17 @@ public void testMinScoreDisablesCountOptimization() throws Exception { assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); assertProfileData(context, "MatchAllDocsQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); - assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(1L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); if (executor != null) { - assertThat(query.getTimeBreakdown().get("max_score"), greaterThanOrEqualTo(100L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(100L)); - assertThat(query.getTimeBreakdown().get("avg_score"), greaterThanOrEqualTo(100L)); + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + assertThat(maxScore, greaterThanOrEqualTo(1L)); + assertThat(minScore, greaterThanOrEqualTo(1L)); + assertThat(avgScore, greaterThanOrEqualTo(1L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(1L)); assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(1L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(1L)); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 8490ee4fc39bc..b5ff30deecf5c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -175,6 +175,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import reactor.core.scheduler.Schedulers; + import static java.util.Collections.emptyMap; import static org.opensearch.core.common.util.CollectionUtils.arrayAsArrayList; import static org.hamcrest.Matchers.empty; @@ -225,6 +227,7 @@ public static void resetPortCounter() { @Override public void tearDown() throws Exception { + Schedulers.shutdownNow(); FeatureFlagSetter.clear(); super.tearDown(); }